2005-09-10 03:10:27 +07:00
|
|
|
/*
|
|
|
|
FUSE: Filesystem in Userspace
|
2008-11-26 18:03:54 +07:00
|
|
|
Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
|
2005-09-10 03:10:27 +07:00
|
|
|
|
|
|
|
This program can be distributed under the terms of the GNU GPL.
|
|
|
|
See the file COPYING.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "fuse_i.h"
|
|
|
|
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/poll.h>
|
|
|
|
#include <linux/uio.h>
|
|
|
|
#include <linux/miscdevice.h>
|
|
|
|
#include <linux/pagemap.h>
|
|
|
|
#include <linux/file.h>
|
|
|
|
#include <linux/slab.h>
|
fuse: support splice() writing to fuse device
Allow userspace filesystem implementation to use splice() to write to
the fuse device. The semantics of using splice() are:
1) buffer the message header and data in a temporary pipe
2) with a *single* splice() call move the message from the temporary pipe
to the fuse device
The READ reply message has the most interesting use for this, since
now the data from an arbitrary file descriptor (which could be a
regular file, a block device or a socket) can be tranferred into the
fuse device without having to go through a userspace buffer. It will
also allow zero copy moving of pages.
One caveat is that the protocol on the fuse device requires the length
of the whole message to be written into the header. But the length of
the data transferred into the temporary pipe may not be known in
advance. The current library implementation works around this by
using vmplice to write the header and modifying the header after
splicing the data into the pipe (error handling omitted):
struct fuse_out_header out;
iov.iov_base = &out;
iov.iov_len = sizeof(struct fuse_out_header);
vmsplice(pip[1], &iov, 1, 0);
len = splice(input_fd, input_offset, pip[1], NULL, len, 0);
/* retrospectively modify the header: */
out.len = len + sizeof(struct fuse_out_header);
splice(pip[0], NULL, fuse_chan_fd(req->ch), NULL, out.len, flags);
This works since vmsplice only saves a pointer to the data, it does
not copy the data itself.
Since pipes are currently limited to 16 pages and messages need to be
spliced atomically, the length of the data is limited to 15 pages (or
60kB for 4k pages).
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
2010-05-25 20:06:06 +07:00
|
|
|
#include <linux/pipe_fs_i.h>
|
2010-05-25 20:06:07 +07:00
|
|
|
#include <linux/swap.h>
|
|
|
|
#include <linux/splice.h>
|
2005-09-10 03:10:27 +07:00
|
|
|
|
|
|
|
MODULE_ALIAS_MISCDEV(FUSE_MINOR);
|
driver core: add devname module aliases to allow module on-demand auto-loading
This adds:
alias: devname:<name>
to some common kernel modules, which will allow the on-demand loading
of the kernel module when the device node is accessed.
Ideally all these modules would be compiled-in, but distros seems too
much in love with their modularization that we need to cover the common
cases with this new facility. It will allow us to remove a bunch of pretty
useless init scripts and modprobes from init scripts.
The static device node aliases will be carried in the module itself. The
program depmod will extract this information to a file in the module directory:
$ cat /lib/modules/2.6.34-00650-g537b60d-dirty/modules.devname
# Device nodes to trigger on-demand module loading.
microcode cpu/microcode c10:184
fuse fuse c10:229
ppp_generic ppp c108:0
tun net/tun c10:200
dm_mod mapper/control c10:235
Udev will pick up the depmod created file on startup and create all the
static device nodes which the kernel modules specify, so that these modules
get automatically loaded when the device node is accessed:
$ /sbin/udevd --debug
...
static_dev_create_from_modules: mknod '/dev/cpu/microcode' c10:184
static_dev_create_from_modules: mknod '/dev/fuse' c10:229
static_dev_create_from_modules: mknod '/dev/ppp' c108:0
static_dev_create_from_modules: mknod '/dev/net/tun' c10:200
static_dev_create_from_modules: mknod '/dev/mapper/control' c10:235
udev_rules_apply_static_dev_perms: chmod '/dev/net/tun' 0666
udev_rules_apply_static_dev_perms: chmod '/dev/fuse' 0666
A few device nodes are switched to statically allocated numbers, to allow
the static nodes to work. This might also useful for systems which still run
a plain static /dev, which is completely unsafe to use with any dynamic minor
numbers.
Note:
The devname aliases must be limited to the *common* and *single*instance*
device nodes, like the misc devices, and never be used for conceptually limited
systems like the loop devices, which should rather get fixed properly and get a
control node for losetup to talk to, instead of creating a random number of
device nodes in advance, regardless if they are ever used.
This facility is to hide the mess distros are creating with too modualized
kernels, and just to hide that these modules are not compiled-in, and not to
paper-over broken concepts. Thanks! :)
Cc: Greg Kroah-Hartman <gregkh@suse.de>
Cc: David S. Miller <davem@davemloft.net>
Cc: Miklos Szeredi <miklos@szeredi.hu>
Cc: Chris Mason <chris.mason@oracle.com>
Cc: Alasdair G Kergon <agk@redhat.com>
Cc: Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
Cc: Ian Kent <raven@themaw.net>
Signed-Off-By: Kay Sievers <kay.sievers@vrfy.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
2010-05-20 23:07:20 +07:00
|
|
|
MODULE_ALIAS("devname:fuse");
|
2005-09-10 03:10:27 +07:00
|
|
|
|
2006-12-07 11:33:20 +07:00
|
|
|
static struct kmem_cache *fuse_req_cachep;
|
2005-09-10 03:10:27 +07:00
|
|
|
|
2015-07-01 21:26:08 +07:00
|
|
|
static struct fuse_dev *fuse_get_dev(struct file *file)
|
2005-09-10 03:10:27 +07:00
|
|
|
{
|
2006-04-11 12:54:55 +07:00
|
|
|
/*
|
|
|
|
* Lockless access is OK, because file->private data is set
|
|
|
|
* once during mount and is valid until the file is released.
|
|
|
|
*/
|
2015-07-01 21:26:08 +07:00
|
|
|
return ACCESS_ONCE(file->private_data);
|
2005-09-10 03:10:27 +07:00
|
|
|
}
|
|
|
|
|
2012-10-26 22:48:07 +07:00
|
|
|
static void fuse_request_init(struct fuse_req *req, struct page **pages,
|
2012-10-26 22:49:24 +07:00
|
|
|
struct fuse_page_desc *page_descs,
|
2012-10-26 22:48:07 +07:00
|
|
|
unsigned npages)
|
2005-09-10 03:10:27 +07:00
|
|
|
{
|
|
|
|
memset(req, 0, sizeof(*req));
|
2012-10-26 22:48:07 +07:00
|
|
|
memset(pages, 0, sizeof(*pages) * npages);
|
2012-10-26 22:49:24 +07:00
|
|
|
memset(page_descs, 0, sizeof(*page_descs) * npages);
|
2005-09-10 03:10:27 +07:00
|
|
|
INIT_LIST_HEAD(&req->list);
|
2006-06-25 19:48:54 +07:00
|
|
|
INIT_LIST_HEAD(&req->intr_entry);
|
2005-09-10 03:10:27 +07:00
|
|
|
init_waitqueue_head(&req->waitq);
|
|
|
|
atomic_set(&req->count, 1);
|
2012-10-26 22:48:07 +07:00
|
|
|
req->pages = pages;
|
2012-10-26 22:49:24 +07:00
|
|
|
req->page_descs = page_descs;
|
2012-10-26 22:48:07 +07:00
|
|
|
req->max_pages = npages;
|
2015-07-01 21:26:01 +07:00
|
|
|
__set_bit(FR_PENDING, &req->flags);
|
2005-09-10 03:10:27 +07:00
|
|
|
}
|
|
|
|
|
2012-10-26 22:48:07 +07:00
|
|
|
static struct fuse_req *__fuse_request_alloc(unsigned npages, gfp_t flags)
|
2005-09-10 03:10:27 +07:00
|
|
|
{
|
2012-10-26 22:48:07 +07:00
|
|
|
struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, flags);
|
|
|
|
if (req) {
|
|
|
|
struct page **pages;
|
2012-10-26 22:49:24 +07:00
|
|
|
struct fuse_page_desc *page_descs;
|
2012-10-26 22:48:07 +07:00
|
|
|
|
2012-10-26 22:49:24 +07:00
|
|
|
if (npages <= FUSE_REQ_INLINE_PAGES) {
|
2012-10-26 22:48:07 +07:00
|
|
|
pages = req->inline_pages;
|
2012-10-26 22:49:24 +07:00
|
|
|
page_descs = req->inline_page_descs;
|
|
|
|
} else {
|
2012-10-26 22:48:07 +07:00
|
|
|
pages = kmalloc(sizeof(struct page *) * npages, flags);
|
2012-10-26 22:49:24 +07:00
|
|
|
page_descs = kmalloc(sizeof(struct fuse_page_desc) *
|
|
|
|
npages, flags);
|
|
|
|
}
|
2012-10-26 22:48:07 +07:00
|
|
|
|
2012-10-26 22:49:24 +07:00
|
|
|
if (!pages || !page_descs) {
|
|
|
|
kfree(pages);
|
|
|
|
kfree(page_descs);
|
2012-10-26 22:48:07 +07:00
|
|
|
kmem_cache_free(fuse_req_cachep, req);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2012-10-26 22:49:24 +07:00
|
|
|
fuse_request_init(req, pages, page_descs, npages);
|
2012-10-26 22:48:07 +07:00
|
|
|
}
|
2005-09-10 03:10:27 +07:00
|
|
|
return req;
|
|
|
|
}
|
2012-10-26 22:48:07 +07:00
|
|
|
|
|
|
|
struct fuse_req *fuse_request_alloc(unsigned npages)
|
|
|
|
{
|
|
|
|
return __fuse_request_alloc(npages, GFP_KERNEL);
|
|
|
|
}
|
2009-04-14 08:54:53 +07:00
|
|
|
EXPORT_SYMBOL_GPL(fuse_request_alloc);
|
2005-09-10 03:10:27 +07:00
|
|
|
|
2012-10-26 22:48:07 +07:00
|
|
|
struct fuse_req *fuse_request_alloc_nofs(unsigned npages)
|
fuse: support writable mmap
Quoting Linus (3 years ago, FUSE inclusion discussions):
"User-space filesystems are hard to get right. I'd claim that they
are almost impossible, unless you limit them somehow (shared
writable mappings are the nastiest part - if you don't have those,
you can reasonably limit your problems by limiting the number of
dirty pages you accept through normal "write()" calls)."
Instead of attempting the impossible, I've just waited for the dirty page
accounting infrastructure to materialize (thanks to Peter Zijlstra and
others). This nicely solved the biggest problem: limiting the number of pages
used for write caching.
Some small details remained, however, which this largish patch attempts to
address. It provides a page writeback implementation for fuse, which is
completely safe against VM related deadlocks. Performance may not be very
good for certain usage patterns, but generally it should be acceptable.
It has been tested extensively with fsx-linux and bash-shared-mapping.
Fuse page writeback design
--------------------------
fuse_writepage() allocates a new temporary page with GFP_NOFS|__GFP_HIGHMEM.
It copies the contents of the original page, and queues a WRITE request to the
userspace filesystem using this temp page.
The writeback is finished instantly from the MM's point of view: the page is
removed from the radix trees, and the PageDirty and PageWriteback flags are
cleared.
For the duration of the actual write, the NR_WRITEBACK_TEMP counter is
incremented. The per-bdi writeback count is not decremented until the actual
write completes.
On dirtying the page, fuse waits for a previous write to finish before
proceeding. This makes sure, there can only be one temporary page used at a
time for one cached page.
This approach is wasteful in both memory and CPU bandwidth, so why is this
complication needed?
The basic problem is that there can be no guarantee about the time in which
the userspace filesystem will complete a write. It may be buggy or even
malicious, and fail to complete WRITE requests. We don't want unrelated parts
of the system to grind to a halt in such cases.
Also a filesystem may need additional resources (particularly memory) to
complete a WRITE request. There's a great danger of a deadlock if that
allocation may wait for the writepage to finish.
Currently there are several cases where the kernel can block on page
writeback:
- allocation order is larger than PAGE_ALLOC_COSTLY_ORDER
- page migration
- throttle_vm_writeout (through NR_WRITEBACK)
- sync(2)
Of course in some cases (fsync, msync) we explicitly want to allow blocking.
So for these cases new code has to be added to fuse, since the VM is not
tracking writeback pages for us any more.
As an extra safetly measure, the maximum dirty ratio allocated to a single
fuse filesystem is set to 1% by default. This way one (or several) buggy or
malicious fuse filesystems cannot slow down the rest of the system by hogging
dirty memory.
With appropriate privileges, this limit can be raised through
'/sys/class/bdi/<bdi>/max_ratio'.
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-04-30 14:54:41 +07:00
|
|
|
{
|
2012-10-26 22:48:07 +07:00
|
|
|
return __fuse_request_alloc(npages, GFP_NOFS);
|
fuse: support writable mmap
Quoting Linus (3 years ago, FUSE inclusion discussions):
"User-space filesystems are hard to get right. I'd claim that they
are almost impossible, unless you limit them somehow (shared
writable mappings are the nastiest part - if you don't have those,
you can reasonably limit your problems by limiting the number of
dirty pages you accept through normal "write()" calls)."
Instead of attempting the impossible, I've just waited for the dirty page
accounting infrastructure to materialize (thanks to Peter Zijlstra and
others). This nicely solved the biggest problem: limiting the number of pages
used for write caching.
Some small details remained, however, which this largish patch attempts to
address. It provides a page writeback implementation for fuse, which is
completely safe against VM related deadlocks. Performance may not be very
good for certain usage patterns, but generally it should be acceptable.
It has been tested extensively with fsx-linux and bash-shared-mapping.
Fuse page writeback design
--------------------------
fuse_writepage() allocates a new temporary page with GFP_NOFS|__GFP_HIGHMEM.
It copies the contents of the original page, and queues a WRITE request to the
userspace filesystem using this temp page.
The writeback is finished instantly from the MM's point of view: the page is
removed from the radix trees, and the PageDirty and PageWriteback flags are
cleared.
For the duration of the actual write, the NR_WRITEBACK_TEMP counter is
incremented. The per-bdi writeback count is not decremented until the actual
write completes.
On dirtying the page, fuse waits for a previous write to finish before
proceeding. This makes sure, there can only be one temporary page used at a
time for one cached page.
This approach is wasteful in both memory and CPU bandwidth, so why is this
complication needed?
The basic problem is that there can be no guarantee about the time in which
the userspace filesystem will complete a write. It may be buggy or even
malicious, and fail to complete WRITE requests. We don't want unrelated parts
of the system to grind to a halt in such cases.
Also a filesystem may need additional resources (particularly memory) to
complete a WRITE request. There's a great danger of a deadlock if that
allocation may wait for the writepage to finish.
Currently there are several cases where the kernel can block on page
writeback:
- allocation order is larger than PAGE_ALLOC_COSTLY_ORDER
- page migration
- throttle_vm_writeout (through NR_WRITEBACK)
- sync(2)
Of course in some cases (fsync, msync) we explicitly want to allow blocking.
So for these cases new code has to be added to fuse, since the VM is not
tracking writeback pages for us any more.
As an extra safetly measure, the maximum dirty ratio allocated to a single
fuse filesystem is set to 1% by default. This way one (or several) buggy or
malicious fuse filesystems cannot slow down the rest of the system by hogging
dirty memory.
With appropriate privileges, this limit can be raised through
'/sys/class/bdi/<bdi>/max_ratio'.
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-04-30 14:54:41 +07:00
|
|
|
}
|
|
|
|
|
2005-09-10 03:10:27 +07:00
|
|
|
void fuse_request_free(struct fuse_req *req)
|
|
|
|
{
|
2012-10-26 22:49:24 +07:00
|
|
|
if (req->pages != req->inline_pages) {
|
2012-10-26 22:48:07 +07:00
|
|
|
kfree(req->pages);
|
2012-10-26 22:49:24 +07:00
|
|
|
kfree(req->page_descs);
|
|
|
|
}
|
2005-09-10 03:10:27 +07:00
|
|
|
kmem_cache_free(fuse_req_cachep, req);
|
|
|
|
}
|
|
|
|
|
2012-12-14 22:20:51 +07:00
|
|
|
void __fuse_get_request(struct fuse_req *req)
|
2005-09-10 03:10:27 +07:00
|
|
|
{
|
|
|
|
atomic_inc(&req->count);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Must be called with > 1 refcount */
|
|
|
|
static void __fuse_put_request(struct fuse_req *req)
|
|
|
|
{
|
|
|
|
BUG_ON(atomic_read(&req->count) < 2);
|
|
|
|
atomic_dec(&req->count);
|
|
|
|
}
|
|
|
|
|
2006-06-25 19:48:52 +07:00
|
|
|
static void fuse_req_init_context(struct fuse_req *req)
|
|
|
|
{
|
2012-02-08 07:26:03 +07:00
|
|
|
req->in.h.uid = from_kuid_munged(&init_user_ns, current_fsuid());
|
|
|
|
req->in.h.gid = from_kgid_munged(&init_user_ns, current_fsgid());
|
2006-06-25 19:48:52 +07:00
|
|
|
req->in.h.pid = current->pid;
|
|
|
|
}
|
|
|
|
|
2015-01-06 16:45:35 +07:00
|
|
|
void fuse_set_initialized(struct fuse_conn *fc)
|
|
|
|
{
|
|
|
|
/* Make sure stores before this are seen on another CPU */
|
|
|
|
smp_wmb();
|
|
|
|
fc->initialized = 1;
|
|
|
|
}
|
|
|
|
|
2013-03-21 21:02:28 +07:00
|
|
|
static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
|
|
|
|
{
|
|
|
|
return !fc->initialized || (for_background && fc->blocked);
|
|
|
|
}
|
|
|
|
|
2013-03-21 21:02:04 +07:00
|
|
|
static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
|
|
|
|
bool for_background)
|
2005-09-10 03:10:27 +07:00
|
|
|
{
|
2006-04-11 12:54:59 +07:00
|
|
|
struct fuse_req *req;
|
|
|
|
int err;
|
2006-04-12 02:16:09 +07:00
|
|
|
atomic_inc(&fc->num_waiting);
|
2013-03-21 21:02:28 +07:00
|
|
|
|
|
|
|
if (fuse_block_alloc(fc, for_background)) {
|
|
|
|
err = -EINTR;
|
2016-07-19 14:08:27 +07:00
|
|
|
if (wait_event_killable_exclusive(fc->blocked_waitq,
|
|
|
|
!fuse_block_alloc(fc, for_background)))
|
2013-03-21 21:02:28 +07:00
|
|
|
goto out;
|
|
|
|
}
|
2015-01-06 16:45:35 +07:00
|
|
|
/* Matches smp_wmb() in fuse_set_initialized() */
|
|
|
|
smp_rmb();
|
2006-04-11 12:54:59 +07:00
|
|
|
|
2006-06-25 19:48:50 +07:00
|
|
|
err = -ENOTCONN;
|
|
|
|
if (!fc->connected)
|
|
|
|
goto out;
|
|
|
|
|
2015-07-01 21:25:57 +07:00
|
|
|
err = -ECONNREFUSED;
|
|
|
|
if (fc->conn_error)
|
|
|
|
goto out;
|
|
|
|
|
2012-10-26 22:48:30 +07:00
|
|
|
req = fuse_request_alloc(npages);
|
2006-04-12 02:16:09 +07:00
|
|
|
err = -ENOMEM;
|
2013-03-21 21:02:36 +07:00
|
|
|
if (!req) {
|
|
|
|
if (for_background)
|
|
|
|
wake_up(&fc->blocked_waitq);
|
2006-04-12 02:16:09 +07:00
|
|
|
goto out;
|
2013-03-21 21:02:36 +07:00
|
|
|
}
|
2005-09-10 03:10:27 +07:00
|
|
|
|
2006-06-25 19:48:52 +07:00
|
|
|
fuse_req_init_context(req);
|
2015-07-01 21:25:58 +07:00
|
|
|
__set_bit(FR_WAITING, &req->flags);
|
|
|
|
if (for_background)
|
|
|
|
__set_bit(FR_BACKGROUND, &req->flags);
|
|
|
|
|
2005-09-10 03:10:27 +07:00
|
|
|
return req;
|
2006-04-12 02:16:09 +07:00
|
|
|
|
|
|
|
out:
|
|
|
|
atomic_dec(&fc->num_waiting);
|
|
|
|
return ERR_PTR(err);
|
2005-09-10 03:10:27 +07:00
|
|
|
}
|
2013-03-21 21:02:04 +07:00
|
|
|
|
|
|
|
struct fuse_req *fuse_get_req(struct fuse_conn *fc, unsigned npages)
|
|
|
|
{
|
|
|
|
return __fuse_get_req(fc, npages, false);
|
|
|
|
}
|
2009-04-14 08:54:53 +07:00
|
|
|
EXPORT_SYMBOL_GPL(fuse_get_req);
|
2005-09-10 03:10:27 +07:00
|
|
|
|
2013-03-21 21:02:04 +07:00
|
|
|
struct fuse_req *fuse_get_req_for_background(struct fuse_conn *fc,
|
|
|
|
unsigned npages)
|
|
|
|
{
|
|
|
|
return __fuse_get_req(fc, npages, true);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(fuse_get_req_for_background);
|
|
|
|
|
2006-06-25 19:48:52 +07:00
|
|
|
/*
|
|
|
|
* Return request in fuse_file->reserved_req. However that may
|
|
|
|
* currently be in use. If that is the case, wait for it to become
|
|
|
|
* available.
|
|
|
|
*/
|
|
|
|
static struct fuse_req *get_reserved_req(struct fuse_conn *fc,
|
|
|
|
struct file *file)
|
|
|
|
{
|
|
|
|
struct fuse_req *req = NULL;
|
|
|
|
struct fuse_file *ff = file->private_data;
|
|
|
|
|
|
|
|
do {
|
2007-10-17 13:31:00 +07:00
|
|
|
wait_event(fc->reserved_req_waitq, ff->reserved_req);
|
2006-06-25 19:48:52 +07:00
|
|
|
spin_lock(&fc->lock);
|
|
|
|
if (ff->reserved_req) {
|
|
|
|
req = ff->reserved_req;
|
|
|
|
ff->reserved_req = NULL;
|
2012-08-28 01:48:26 +07:00
|
|
|
req->stolen_file = get_file(file);
|
2006-06-25 19:48:52 +07:00
|
|
|
}
|
|
|
|
spin_unlock(&fc->lock);
|
|
|
|
} while (!req);
|
|
|
|
|
|
|
|
return req;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Put stolen request back into fuse_file->reserved_req
|
|
|
|
*/
|
|
|
|
static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req)
|
|
|
|
{
|
|
|
|
struct file *file = req->stolen_file;
|
|
|
|
struct fuse_file *ff = file->private_data;
|
|
|
|
|
|
|
|
spin_lock(&fc->lock);
|
2012-10-26 22:49:24 +07:00
|
|
|
fuse_request_init(req, req->pages, req->page_descs, req->max_pages);
|
2006-06-25 19:48:52 +07:00
|
|
|
BUG_ON(ff->reserved_req);
|
|
|
|
ff->reserved_req = req;
|
2007-10-17 13:31:00 +07:00
|
|
|
wake_up_all(&fc->reserved_req_waitq);
|
2006-06-25 19:48:52 +07:00
|
|
|
spin_unlock(&fc->lock);
|
|
|
|
fput(file);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Gets a requests for a file operation, always succeeds
|
|
|
|
*
|
|
|
|
* This is used for sending the FLUSH request, which must get to
|
|
|
|
* userspace, due to POSIX locks which may need to be unlocked.
|
|
|
|
*
|
|
|
|
* If allocation fails due to OOM, use the reserved request in
|
|
|
|
* fuse_file.
|
|
|
|
*
|
|
|
|
* This is very unlikely to deadlock accidentally, since the
|
|
|
|
* filesystem should not have it's own file open. If deadlock is
|
|
|
|
* intentional, it can still be broken by "aborting" the filesystem.
|
|
|
|
*/
|
2012-10-26 22:48:30 +07:00
|
|
|
struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc,
|
|
|
|
struct file *file)
|
2006-06-25 19:48:52 +07:00
|
|
|
{
|
|
|
|
struct fuse_req *req;
|
|
|
|
|
|
|
|
atomic_inc(&fc->num_waiting);
|
2013-03-21 21:02:28 +07:00
|
|
|
wait_event(fc->blocked_waitq, fc->initialized);
|
2015-01-06 16:45:35 +07:00
|
|
|
/* Matches smp_wmb() in fuse_set_initialized() */
|
|
|
|
smp_rmb();
|
2012-10-26 22:48:30 +07:00
|
|
|
req = fuse_request_alloc(0);
|
2006-06-25 19:48:52 +07:00
|
|
|
if (!req)
|
|
|
|
req = get_reserved_req(fc, file);
|
|
|
|
|
|
|
|
fuse_req_init_context(req);
|
2015-07-01 21:25:58 +07:00
|
|
|
__set_bit(FR_WAITING, &req->flags);
|
|
|
|
__clear_bit(FR_BACKGROUND, &req->flags);
|
2006-06-25 19:48:52 +07:00
|
|
|
return req;
|
|
|
|
}
|
|
|
|
|
2005-09-10 03:10:27 +07:00
|
|
|
void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
|
2006-02-05 14:27:40 +07:00
|
|
|
{
|
|
|
|
if (atomic_dec_and_test(&req->count)) {
|
2015-07-01 21:25:58 +07:00
|
|
|
if (test_bit(FR_BACKGROUND, &req->flags)) {
|
2013-03-21 21:02:36 +07:00
|
|
|
/*
|
|
|
|
* We get here in the unlikely case that a background
|
|
|
|
* request was allocated but not sent
|
|
|
|
*/
|
|
|
|
spin_lock(&fc->lock);
|
|
|
|
if (!fc->blocked)
|
|
|
|
wake_up(&fc->blocked_waitq);
|
|
|
|
spin_unlock(&fc->lock);
|
|
|
|
}
|
|
|
|
|
2015-07-01 21:25:58 +07:00
|
|
|
if (test_bit(FR_WAITING, &req->flags)) {
|
|
|
|
__clear_bit(FR_WAITING, &req->flags);
|
2006-04-12 02:16:09 +07:00
|
|
|
atomic_dec(&fc->num_waiting);
|
2015-07-01 21:25:56 +07:00
|
|
|
}
|
2006-06-25 19:48:52 +07:00
|
|
|
|
|
|
|
if (req->stolen_file)
|
|
|
|
put_reserved_req(fc, req);
|
|
|
|
else
|
|
|
|
fuse_request_free(req);
|
2006-02-05 14:27:40 +07:00
|
|
|
}
|
|
|
|
}
|
2009-04-14 08:54:53 +07:00
|
|
|
EXPORT_SYMBOL_GPL(fuse_put_request);
|
2006-02-05 14:27:40 +07:00
|
|
|
|
2008-02-06 16:38:39 +07:00
|
|
|
static unsigned len_args(unsigned numargs, struct fuse_arg *args)
|
|
|
|
{
|
|
|
|
unsigned nbytes = 0;
|
|
|
|
unsigned i;
|
|
|
|
|
|
|
|
for (i = 0; i < numargs; i++)
|
|
|
|
nbytes += args[i].size;
|
|
|
|
|
|
|
|
return nbytes;
|
|
|
|
}
|
|
|
|
|
2015-07-01 21:26:01 +07:00
|
|
|
static u64 fuse_get_unique(struct fuse_iqueue *fiq)
|
2008-02-06 16:38:39 +07:00
|
|
|
{
|
2015-07-01 21:26:01 +07:00
|
|
|
return ++fiq->reqctr;
|
2008-02-06 16:38:39 +07:00
|
|
|
}
|
|
|
|
|
2015-07-01 21:26:01 +07:00
|
|
|
static void queue_request(struct fuse_iqueue *fiq, struct fuse_req *req)
|
2008-02-06 16:38:39 +07:00
|
|
|
{
|
|
|
|
req->in.h.len = sizeof(struct fuse_in_header) +
|
|
|
|
len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
|
2015-07-01 21:26:01 +07:00
|
|
|
list_add_tail(&req->list, &fiq->pending);
|
2015-07-01 21:26:02 +07:00
|
|
|
wake_up_locked(&fiq->waitq);
|
2015-07-01 21:26:01 +07:00
|
|
|
kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
|
2008-02-06 16:38:39 +07:00
|
|
|
}
|
|
|
|
|
2010-12-08 02:16:56 +07:00
|
|
|
void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
|
|
|
|
u64 nodeid, u64 nlookup)
|
|
|
|
{
|
2015-07-01 21:26:01 +07:00
|
|
|
struct fuse_iqueue *fiq = &fc->iq;
|
|
|
|
|
2010-12-08 02:16:56 +07:00
|
|
|
forget->forget_one.nodeid = nodeid;
|
|
|
|
forget->forget_one.nlookup = nlookup;
|
2010-12-08 02:16:56 +07:00
|
|
|
|
2015-07-01 21:26:02 +07:00
|
|
|
spin_lock(&fiq->waitq.lock);
|
2015-07-01 21:26:01 +07:00
|
|
|
if (fiq->connected) {
|
2015-07-01 21:26:01 +07:00
|
|
|
fiq->forget_list_tail->next = forget;
|
|
|
|
fiq->forget_list_tail = forget;
|
2015-07-01 21:26:02 +07:00
|
|
|
wake_up_locked(&fiq->waitq);
|
2015-07-01 21:26:01 +07:00
|
|
|
kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
|
2011-09-12 14:38:03 +07:00
|
|
|
} else {
|
|
|
|
kfree(forget);
|
|
|
|
}
|
2015-07-01 21:26:02 +07:00
|
|
|
spin_unlock(&fiq->waitq.lock);
|
2010-12-08 02:16:56 +07:00
|
|
|
}
|
|
|
|
|
2008-02-06 16:38:39 +07:00
|
|
|
static void flush_bg_queue(struct fuse_conn *fc)
|
|
|
|
{
|
2009-07-02 07:28:41 +07:00
|
|
|
while (fc->active_background < fc->max_background &&
|
2008-02-06 16:38:39 +07:00
|
|
|
!list_empty(&fc->bg_queue)) {
|
|
|
|
struct fuse_req *req;
|
2015-07-01 21:26:01 +07:00
|
|
|
struct fuse_iqueue *fiq = &fc->iq;
|
2008-02-06 16:38:39 +07:00
|
|
|
|
|
|
|
req = list_entry(fc->bg_queue.next, struct fuse_req, list);
|
|
|
|
list_del(&req->list);
|
|
|
|
fc->active_background++;
|
2015-07-01 21:26:02 +07:00
|
|
|
spin_lock(&fiq->waitq.lock);
|
2015-07-01 21:26:01 +07:00
|
|
|
req->in.h.unique = fuse_get_unique(fiq);
|
|
|
|
queue_request(fiq, req);
|
2015-07-01 21:26:02 +07:00
|
|
|
spin_unlock(&fiq->waitq.lock);
|
2008-02-06 16:38:39 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-09-10 03:10:27 +07:00
|
|
|
/*
|
|
|
|
* This function is called when a request is finished. Either a reply
|
2006-06-25 19:48:53 +07:00
|
|
|
* has arrived or it was aborted (and not yet sent) or some error
|
2006-01-17 13:14:26 +07:00
|
|
|
* occurred during communication with userspace, or the device file
|
2006-06-25 19:48:50 +07:00
|
|
|
* was closed. The requester thread is woken up (if still waiting),
|
|
|
|
* the 'end' callback is called if given, else the reference to the
|
|
|
|
* request is released
|
2005-09-10 03:10:27 +07:00
|
|
|
*/
|
|
|
|
static void request_end(struct fuse_conn *fc, struct fuse_req *req)
|
|
|
|
{
|
2015-07-01 21:26:02 +07:00
|
|
|
struct fuse_iqueue *fiq = &fc->iq;
|
2015-07-01 21:26:06 +07:00
|
|
|
|
2015-07-01 21:26:07 +07:00
|
|
|
if (test_and_set_bit(FR_FINISHED, &req->flags))
|
2015-07-01 21:26:06 +07:00
|
|
|
return;
|
|
|
|
|
2015-07-01 21:26:02 +07:00
|
|
|
spin_lock(&fiq->waitq.lock);
|
2015-07-01 21:25:58 +07:00
|
|
|
list_del_init(&req->intr_entry);
|
2015-07-01 21:26:02 +07:00
|
|
|
spin_unlock(&fiq->waitq.lock);
|
2015-07-01 21:26:01 +07:00
|
|
|
WARN_ON(test_bit(FR_PENDING, &req->flags));
|
|
|
|
WARN_ON(test_bit(FR_SENT, &req->flags));
|
2015-07-01 21:25:58 +07:00
|
|
|
if (test_bit(FR_BACKGROUND, &req->flags)) {
|
2015-07-01 21:26:07 +07:00
|
|
|
spin_lock(&fc->lock);
|
2015-07-01 21:25:58 +07:00
|
|
|
clear_bit(FR_BACKGROUND, &req->flags);
|
2013-03-21 21:02:36 +07:00
|
|
|
if (fc->num_background == fc->max_background)
|
2006-06-25 19:48:50 +07:00
|
|
|
fc->blocked = 0;
|
2013-03-21 21:02:36 +07:00
|
|
|
|
|
|
|
/* Wake up next waiter, if any */
|
2013-04-18 02:50:58 +07:00
|
|
|
if (!fc->blocked && waitqueue_active(&fc->blocked_waitq))
|
2013-03-21 21:02:36 +07:00
|
|
|
wake_up(&fc->blocked_waitq);
|
|
|
|
|
2009-07-02 07:28:41 +07:00
|
|
|
if (fc->num_background == fc->congestion_threshold &&
|
2009-04-14 08:54:52 +07:00
|
|
|
fc->connected && fc->bdi_initialized) {
|
2009-07-09 19:52:32 +07:00
|
|
|
clear_bdi_congested(&fc->bdi, BLK_RW_SYNC);
|
|
|
|
clear_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
|
2007-10-17 13:30:59 +07:00
|
|
|
}
|
2006-06-25 19:48:50 +07:00
|
|
|
fc->num_background--;
|
2008-02-06 16:38:39 +07:00
|
|
|
fc->active_background--;
|
|
|
|
flush_bg_queue(fc);
|
2015-07-01 21:26:07 +07:00
|
|
|
spin_unlock(&fc->lock);
|
2005-09-10 03:10:27 +07:00
|
|
|
}
|
2006-06-25 19:48:50 +07:00
|
|
|
wake_up(&req->waitq);
|
2015-07-01 21:26:07 +07:00
|
|
|
if (req->end)
|
|
|
|
req->end(fc, req);
|
2008-11-26 18:03:54 +07:00
|
|
|
fuse_put_request(fc, req);
|
2005-09-10 03:10:27 +07:00
|
|
|
}
|
|
|
|
|
2015-07-01 21:26:01 +07:00
|
|
|
static void queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
|
2006-06-25 19:48:54 +07:00
|
|
|
{
|
2015-07-01 21:26:02 +07:00
|
|
|
spin_lock(&fiq->waitq.lock);
|
2015-07-01 21:26:03 +07:00
|
|
|
if (list_empty(&req->intr_entry)) {
|
|
|
|
list_add_tail(&req->intr_entry, &fiq->interrupts);
|
|
|
|
wake_up_locked(&fiq->waitq);
|
|
|
|
}
|
2015-07-01 21:26:02 +07:00
|
|
|
spin_unlock(&fiq->waitq.lock);
|
2015-07-01 21:26:01 +07:00
|
|
|
kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
|
2006-06-25 19:48:54 +07:00
|
|
|
}
|
|
|
|
|
2005-09-10 03:10:39 +07:00
|
|
|
static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
|
2005-09-10 03:10:27 +07:00
|
|
|
{
|
2015-07-01 21:26:02 +07:00
|
|
|
struct fuse_iqueue *fiq = &fc->iq;
|
2015-07-01 21:26:00 +07:00
|
|
|
int err;
|
|
|
|
|
2006-06-25 19:48:54 +07:00
|
|
|
if (!fc->no_interrupt) {
|
|
|
|
/* Any signal may interrupt this */
|
2015-07-01 21:26:00 +07:00
|
|
|
err = wait_event_interruptible(req->waitq,
|
2015-07-01 21:26:01 +07:00
|
|
|
test_bit(FR_FINISHED, &req->flags));
|
2015-07-01 21:26:00 +07:00
|
|
|
if (!err)
|
2006-06-25 19:48:54 +07:00
|
|
|
return;
|
|
|
|
|
2015-07-01 21:25:58 +07:00
|
|
|
set_bit(FR_INTERRUPTED, &req->flags);
|
2015-07-01 21:26:03 +07:00
|
|
|
/* matches barrier in fuse_dev_do_read() */
|
|
|
|
smp_mb__after_atomic();
|
2015-07-01 21:26:01 +07:00
|
|
|
if (test_bit(FR_SENT, &req->flags))
|
2015-07-01 21:26:02 +07:00
|
|
|
queue_interrupt(fiq, req);
|
2006-06-25 19:48:54 +07:00
|
|
|
}
|
|
|
|
|
2015-07-01 21:25:58 +07:00
|
|
|
if (!test_bit(FR_FORCE, &req->flags)) {
|
2006-06-25 19:48:54 +07:00
|
|
|
/* Only fatal signals may interrupt this */
|
2016-07-19 14:08:27 +07:00
|
|
|
err = wait_event_killable(req->waitq,
|
2015-07-01 21:26:01 +07:00
|
|
|
test_bit(FR_FINISHED, &req->flags));
|
2015-07-01 21:26:00 +07:00
|
|
|
if (!err)
|
2007-10-17 13:31:04 +07:00
|
|
|
return;
|
|
|
|
|
2015-07-01 21:26:02 +07:00
|
|
|
spin_lock(&fiq->waitq.lock);
|
2007-10-17 13:31:04 +07:00
|
|
|
/* Request is not yet in userspace, bail out */
|
2015-07-01 21:26:01 +07:00
|
|
|
if (test_bit(FR_PENDING, &req->flags)) {
|
2007-10-17 13:31:04 +07:00
|
|
|
list_del(&req->list);
|
2015-07-01 21:26:02 +07:00
|
|
|
spin_unlock(&fiq->waitq.lock);
|
2007-10-17 13:31:04 +07:00
|
|
|
__fuse_put_request(req);
|
|
|
|
req->out.h.error = -EINTR;
|
|
|
|
return;
|
|
|
|
}
|
2015-07-01 21:26:02 +07:00
|
|
|
spin_unlock(&fiq->waitq.lock);
|
2006-06-25 19:48:50 +07:00
|
|
|
}
|
2005-09-10 03:10:27 +07:00
|
|
|
|
2007-10-17 13:31:04 +07:00
|
|
|
/*
|
|
|
|
* Either request is already in userspace, or it was forced.
|
|
|
|
* Wait it out.
|
|
|
|
*/
|
2015-07-01 21:26:01 +07:00
|
|
|
wait_event(req->waitq, test_bit(FR_FINISHED, &req->flags));
|
2005-09-10 03:10:27 +07:00
|
|
|
}
|
|
|
|
|
2013-02-04 20:04:44 +07:00
|
|
|
static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
|
2005-09-10 03:10:27 +07:00
|
|
|
{
|
2015-07-01 21:26:01 +07:00
|
|
|
struct fuse_iqueue *fiq = &fc->iq;
|
|
|
|
|
2015-07-01 21:25:58 +07:00
|
|
|
BUG_ON(test_bit(FR_BACKGROUND, &req->flags));
|
2015-07-01 21:26:02 +07:00
|
|
|
spin_lock(&fiq->waitq.lock);
|
2015-07-01 21:26:01 +07:00
|
|
|
if (!fiq->connected) {
|
2015-07-01 21:26:02 +07:00
|
|
|
spin_unlock(&fiq->waitq.lock);
|
2005-09-10 03:10:27 +07:00
|
|
|
req->out.h.error = -ENOTCONN;
|
2015-07-01 21:26:00 +07:00
|
|
|
} else {
|
2015-07-01 21:26:01 +07:00
|
|
|
req->in.h.unique = fuse_get_unique(fiq);
|
|
|
|
queue_request(fiq, req);
|
2005-09-10 03:10:27 +07:00
|
|
|
/* acquire extra reference, since request is still needed
|
|
|
|
after request_end() */
|
|
|
|
__fuse_get_request(req);
|
2015-07-01 21:26:02 +07:00
|
|
|
spin_unlock(&fiq->waitq.lock);
|
2005-09-10 03:10:27 +07:00
|
|
|
|
2005-09-10 03:10:39 +07:00
|
|
|
request_wait_answer(fc, req);
|
2015-07-01 21:26:00 +07:00
|
|
|
/* Pairs with smp_wmb() in request_end() */
|
|
|
|
smp_rmb();
|
2005-09-10 03:10:27 +07:00
|
|
|
}
|
|
|
|
}
|
2013-02-04 20:04:44 +07:00
|
|
|
|
|
|
|
void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
|
|
|
|
{
|
2015-07-01 21:25:58 +07:00
|
|
|
__set_bit(FR_ISREPLY, &req->flags);
|
|
|
|
if (!test_bit(FR_WAITING, &req->flags)) {
|
|
|
|
__set_bit(FR_WAITING, &req->flags);
|
2015-07-01 21:25:56 +07:00
|
|
|
atomic_inc(&fc->num_waiting);
|
|
|
|
}
|
2013-02-04 20:04:44 +07:00
|
|
|
__fuse_request_send(fc, req);
|
|
|
|
}
|
2009-04-14 08:54:53 +07:00
|
|
|
EXPORT_SYMBOL_GPL(fuse_request_send);
|
2005-09-10 03:10:27 +07:00
|
|
|
|
2015-01-06 16:45:35 +07:00
|
|
|
static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args)
|
|
|
|
{
|
|
|
|
if (fc->minor < 4 && args->in.h.opcode == FUSE_STATFS)
|
|
|
|
args->out.args[0].size = FUSE_COMPAT_STATFS_SIZE;
|
|
|
|
|
|
|
|
if (fc->minor < 9) {
|
|
|
|
switch (args->in.h.opcode) {
|
|
|
|
case FUSE_LOOKUP:
|
|
|
|
case FUSE_CREATE:
|
|
|
|
case FUSE_MKNOD:
|
|
|
|
case FUSE_MKDIR:
|
|
|
|
case FUSE_SYMLINK:
|
|
|
|
case FUSE_LINK:
|
|
|
|
args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
|
|
|
|
break;
|
|
|
|
case FUSE_GETATTR:
|
|
|
|
case FUSE_SETATTR:
|
|
|
|
args->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (fc->minor < 12) {
|
|
|
|
switch (args->in.h.opcode) {
|
|
|
|
case FUSE_CREATE:
|
|
|
|
args->in.args[0].size = sizeof(struct fuse_open_in);
|
|
|
|
break;
|
|
|
|
case FUSE_MKNOD:
|
|
|
|
args->in.args[0].size = FUSE_COMPAT_MKNOD_IN_SIZE;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-12-12 15:49:05 +07:00
|
|
|
ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args)
|
|
|
|
{
|
|
|
|
struct fuse_req *req;
|
|
|
|
ssize_t ret;
|
|
|
|
|
|
|
|
req = fuse_get_req(fc, 0);
|
|
|
|
if (IS_ERR(req))
|
|
|
|
return PTR_ERR(req);
|
|
|
|
|
2015-01-06 16:45:35 +07:00
|
|
|
/* Needs to be done after fuse_get_req() so that fc->minor is valid */
|
|
|
|
fuse_adjust_compat(fc, args);
|
|
|
|
|
2014-12-12 15:49:05 +07:00
|
|
|
req->in.h.opcode = args->in.h.opcode;
|
|
|
|
req->in.h.nodeid = args->in.h.nodeid;
|
|
|
|
req->in.numargs = args->in.numargs;
|
|
|
|
memcpy(req->in.args, args->in.args,
|
|
|
|
args->in.numargs * sizeof(struct fuse_in_arg));
|
|
|
|
req->out.argvar = args->out.argvar;
|
|
|
|
req->out.numargs = args->out.numargs;
|
|
|
|
memcpy(req->out.args, args->out.args,
|
|
|
|
args->out.numargs * sizeof(struct fuse_arg));
|
|
|
|
fuse_request_send(fc, req);
|
|
|
|
ret = req->out.h.error;
|
|
|
|
if (!ret && args->out.argvar) {
|
|
|
|
BUG_ON(args->out.numargs != 1);
|
|
|
|
ret = req->out.args[0].size;
|
|
|
|
}
|
|
|
|
fuse_put_request(fc, req);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-07-01 21:25:57 +07:00
|
|
|
/*
|
|
|
|
* Called under fc->lock
|
|
|
|
*
|
|
|
|
* fc->connected must have been checked previously
|
|
|
|
*/
|
|
|
|
void fuse_request_send_background_locked(struct fuse_conn *fc,
|
|
|
|
struct fuse_req *req)
|
2008-02-06 16:38:39 +07:00
|
|
|
{
|
2015-07-01 21:25:58 +07:00
|
|
|
BUG_ON(!test_bit(FR_BACKGROUND, &req->flags));
|
|
|
|
if (!test_bit(FR_WAITING, &req->flags)) {
|
|
|
|
__set_bit(FR_WAITING, &req->flags);
|
2015-07-01 21:25:56 +07:00
|
|
|
atomic_inc(&fc->num_waiting);
|
|
|
|
}
|
2015-07-01 21:25:58 +07:00
|
|
|
__set_bit(FR_ISREPLY, &req->flags);
|
2008-02-06 16:38:39 +07:00
|
|
|
fc->num_background++;
|
2009-07-02 07:28:41 +07:00
|
|
|
if (fc->num_background == fc->max_background)
|
2008-02-06 16:38:39 +07:00
|
|
|
fc->blocked = 1;
|
2009-07-02 07:28:41 +07:00
|
|
|
if (fc->num_background == fc->congestion_threshold &&
|
2009-04-14 08:54:52 +07:00
|
|
|
fc->bdi_initialized) {
|
2009-07-09 19:52:32 +07:00
|
|
|
set_bdi_congested(&fc->bdi, BLK_RW_SYNC);
|
|
|
|
set_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
|
2008-02-06 16:38:39 +07:00
|
|
|
}
|
|
|
|
list_add_tail(&req->list, &fc->bg_queue);
|
|
|
|
flush_bg_queue(fc);
|
|
|
|
}
|
|
|
|
|
2015-07-01 21:25:57 +07:00
|
|
|
void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
|
2005-09-10 03:10:27 +07:00
|
|
|
{
|
2015-07-01 21:25:56 +07:00
|
|
|
BUG_ON(!req->end);
|
2006-04-11 12:54:55 +07:00
|
|
|
spin_lock(&fc->lock);
|
2005-09-10 03:10:31 +07:00
|
|
|
if (fc->connected) {
|
2015-07-01 21:25:57 +07:00
|
|
|
fuse_request_send_background_locked(fc, req);
|
2006-04-11 12:54:55 +07:00
|
|
|
spin_unlock(&fc->lock);
|
2005-09-10 03:10:27 +07:00
|
|
|
} else {
|
2015-07-01 21:25:56 +07:00
|
|
|
spin_unlock(&fc->lock);
|
2005-09-10 03:10:27 +07:00
|
|
|
req->out.h.error = -ENOTCONN;
|
2015-07-01 21:25:56 +07:00
|
|
|
req->end(fc, req);
|
|
|
|
fuse_put_request(fc, req);
|
2005-09-10 03:10:27 +07:00
|
|
|
}
|
|
|
|
}
|
2009-04-14 08:54:53 +07:00
|
|
|
EXPORT_SYMBOL_GPL(fuse_request_send_background);
|
2005-09-10 03:10:27 +07:00
|
|
|
|
2010-07-12 19:41:40 +07:00
|
|
|
static int fuse_request_send_notify_reply(struct fuse_conn *fc,
|
|
|
|
struct fuse_req *req, u64 unique)
|
|
|
|
{
|
|
|
|
int err = -ENODEV;
|
2015-07-01 21:26:01 +07:00
|
|
|
struct fuse_iqueue *fiq = &fc->iq;
|
2010-07-12 19:41:40 +07:00
|
|
|
|
2015-07-01 21:25:58 +07:00
|
|
|
__clear_bit(FR_ISREPLY, &req->flags);
|
2010-07-12 19:41:40 +07:00
|
|
|
req->in.h.unique = unique;
|
2015-07-01 21:26:02 +07:00
|
|
|
spin_lock(&fiq->waitq.lock);
|
2015-07-01 21:26:01 +07:00
|
|
|
if (fiq->connected) {
|
2015-07-01 21:26:01 +07:00
|
|
|
queue_request(fiq, req);
|
2010-07-12 19:41:40 +07:00
|
|
|
err = 0;
|
|
|
|
}
|
2015-07-01 21:26:02 +07:00
|
|
|
spin_unlock(&fiq->waitq.lock);
|
2010-07-12 19:41:40 +07:00
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2012-08-19 19:53:23 +07:00
|
|
|
void fuse_force_forget(struct file *file, u64 nodeid)
|
|
|
|
{
|
2013-02-28 04:59:05 +07:00
|
|
|
struct inode *inode = file_inode(file);
|
2012-08-19 19:53:23 +07:00
|
|
|
struct fuse_conn *fc = get_fuse_conn(inode);
|
|
|
|
struct fuse_req *req;
|
|
|
|
struct fuse_forget_in inarg;
|
|
|
|
|
|
|
|
memset(&inarg, 0, sizeof(inarg));
|
|
|
|
inarg.nlookup = 1;
|
2012-10-26 22:48:30 +07:00
|
|
|
req = fuse_get_req_nofail_nopages(fc, file);
|
2012-08-19 19:53:23 +07:00
|
|
|
req->in.h.opcode = FUSE_FORGET;
|
|
|
|
req->in.h.nodeid = nodeid;
|
|
|
|
req->in.numargs = 1;
|
|
|
|
req->in.args[0].size = sizeof(inarg);
|
|
|
|
req->in.args[0].value = &inarg;
|
2015-07-01 21:25:58 +07:00
|
|
|
__clear_bit(FR_ISREPLY, &req->flags);
|
2013-02-04 20:04:44 +07:00
|
|
|
__fuse_request_send(fc, req);
|
|
|
|
/* ignore errors */
|
|
|
|
fuse_put_request(fc, req);
|
2012-08-19 19:53:23 +07:00
|
|
|
}
|
|
|
|
|
2005-09-10 03:10:27 +07:00
|
|
|
/*
|
|
|
|
* Lock the request. Up to the next unlock_request() there mustn't be
|
|
|
|
* anything that could cause a page-fault. If the request was already
|
2006-06-25 19:48:53 +07:00
|
|
|
* aborted bail out.
|
2005-09-10 03:10:27 +07:00
|
|
|
*/
|
2015-07-01 21:25:58 +07:00
|
|
|
static int lock_request(struct fuse_req *req)
|
2005-09-10 03:10:27 +07:00
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
if (req) {
|
2015-07-01 21:25:58 +07:00
|
|
|
spin_lock(&req->waitq.lock);
|
2015-07-01 21:25:58 +07:00
|
|
|
if (test_bit(FR_ABORTED, &req->flags))
|
2005-09-10 03:10:27 +07:00
|
|
|
err = -ENOENT;
|
|
|
|
else
|
2015-07-01 21:25:58 +07:00
|
|
|
set_bit(FR_LOCKED, &req->flags);
|
2015-07-01 21:25:58 +07:00
|
|
|
spin_unlock(&req->waitq.lock);
|
2005-09-10 03:10:27 +07:00
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2015-07-01 21:25:58 +07:00
|
|
|
* Unlock request. If it was aborted while locked, caller is responsible
|
|
|
|
* for unlocking and ending the request.
|
2005-09-10 03:10:27 +07:00
|
|
|
*/
|
2015-07-01 21:25:58 +07:00
|
|
|
static int unlock_request(struct fuse_req *req)
|
2005-09-10 03:10:27 +07:00
|
|
|
{
|
2015-07-01 21:25:58 +07:00
|
|
|
int err = 0;
|
2005-09-10 03:10:27 +07:00
|
|
|
if (req) {
|
2015-07-01 21:25:58 +07:00
|
|
|
spin_lock(&req->waitq.lock);
|
2015-07-01 21:25:58 +07:00
|
|
|
if (test_bit(FR_ABORTED, &req->flags))
|
2015-07-01 21:25:58 +07:00
|
|
|
err = -ENOENT;
|
|
|
|
else
|
2015-07-01 21:25:58 +07:00
|
|
|
clear_bit(FR_LOCKED, &req->flags);
|
2015-07-01 21:25:58 +07:00
|
|
|
spin_unlock(&req->waitq.lock);
|
2005-09-10 03:10:27 +07:00
|
|
|
}
|
2015-07-01 21:25:58 +07:00
|
|
|
return err;
|
2005-09-10 03:10:27 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
struct fuse_copy_state {
|
|
|
|
int write;
|
|
|
|
struct fuse_req *req;
|
2015-04-04 09:06:08 +07:00
|
|
|
struct iov_iter *iter;
|
fuse: support splice() writing to fuse device
Allow userspace filesystem implementation to use splice() to write to
the fuse device. The semantics of using splice() are:
1) buffer the message header and data in a temporary pipe
2) with a *single* splice() call move the message from the temporary pipe
to the fuse device
The READ reply message has the most interesting use for this, since
now the data from an arbitrary file descriptor (which could be a
regular file, a block device or a socket) can be tranferred into the
fuse device without having to go through a userspace buffer. It will
also allow zero copy moving of pages.
One caveat is that the protocol on the fuse device requires the length
of the whole message to be written into the header. But the length of
the data transferred into the temporary pipe may not be known in
advance. The current library implementation works around this by
using vmplice to write the header and modifying the header after
splicing the data into the pipe (error handling omitted):
struct fuse_out_header out;
iov.iov_base = &out;
iov.iov_len = sizeof(struct fuse_out_header);
vmsplice(pip[1], &iov, 1, 0);
len = splice(input_fd, input_offset, pip[1], NULL, len, 0);
/* retrospectively modify the header: */
out.len = len + sizeof(struct fuse_out_header);
splice(pip[0], NULL, fuse_chan_fd(req->ch), NULL, out.len, flags);
This works since vmsplice only saves a pointer to the data, it does
not copy the data itself.
Since pipes are currently limited to 16 pages and messages need to be
spliced atomically, the length of the data is limited to 15 pages (or
60kB for 4k pages).
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
2010-05-25 20:06:06 +07:00
|
|
|
struct pipe_buffer *pipebufs;
|
|
|
|
struct pipe_buffer *currbuf;
|
|
|
|
struct pipe_inode_info *pipe;
|
2005-09-10 03:10:27 +07:00
|
|
|
unsigned long nr_segs;
|
|
|
|
struct page *pg;
|
|
|
|
unsigned len;
|
2014-07-07 20:28:51 +07:00
|
|
|
unsigned offset;
|
2010-05-25 20:06:07 +07:00
|
|
|
unsigned move_pages:1;
|
2005-09-10 03:10:27 +07:00
|
|
|
};
|
|
|
|
|
2015-07-01 21:25:58 +07:00
|
|
|
static void fuse_copy_init(struct fuse_copy_state *cs, int write,
|
2015-04-04 09:06:08 +07:00
|
|
|
struct iov_iter *iter)
|
2005-09-10 03:10:27 +07:00
|
|
|
{
|
|
|
|
memset(cs, 0, sizeof(*cs));
|
|
|
|
cs->write = write;
|
2015-04-04 09:06:08 +07:00
|
|
|
cs->iter = iter;
|
2005-09-10 03:10:27 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Unmap and put previous page of userspace buffer */
|
2006-01-17 13:14:28 +07:00
|
|
|
static void fuse_copy_finish(struct fuse_copy_state *cs)
|
2005-09-10 03:10:27 +07:00
|
|
|
{
|
fuse: support splice() writing to fuse device
Allow userspace filesystem implementation to use splice() to write to
the fuse device. The semantics of using splice() are:
1) buffer the message header and data in a temporary pipe
2) with a *single* splice() call move the message from the temporary pipe
to the fuse device
The READ reply message has the most interesting use for this, since
now the data from an arbitrary file descriptor (which could be a
regular file, a block device or a socket) can be tranferred into the
fuse device without having to go through a userspace buffer. It will
also allow zero copy moving of pages.
One caveat is that the protocol on the fuse device requires the length
of the whole message to be written into the header. But the length of
the data transferred into the temporary pipe may not be known in
advance. The current library implementation works around this by
using vmplice to write the header and modifying the header after
splicing the data into the pipe (error handling omitted):
struct fuse_out_header out;
iov.iov_base = &out;
iov.iov_len = sizeof(struct fuse_out_header);
vmsplice(pip[1], &iov, 1, 0);
len = splice(input_fd, input_offset, pip[1], NULL, len, 0);
/* retrospectively modify the header: */
out.len = len + sizeof(struct fuse_out_header);
splice(pip[0], NULL, fuse_chan_fd(req->ch), NULL, out.len, flags);
This works since vmsplice only saves a pointer to the data, it does
not copy the data itself.
Since pipes are currently limited to 16 pages and messages need to be
spliced atomically, the length of the data is limited to 15 pages (or
60kB for 4k pages).
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
2010-05-25 20:06:06 +07:00
|
|
|
if (cs->currbuf) {
|
|
|
|
struct pipe_buffer *buf = cs->currbuf;
|
|
|
|
|
2014-07-07 20:28:51 +07:00
|
|
|
if (cs->write)
|
2010-05-25 20:06:07 +07:00
|
|
|
buf->len = PAGE_SIZE - cs->len;
|
fuse: support splice() writing to fuse device
Allow userspace filesystem implementation to use splice() to write to
the fuse device. The semantics of using splice() are:
1) buffer the message header and data in a temporary pipe
2) with a *single* splice() call move the message from the temporary pipe
to the fuse device
The READ reply message has the most interesting use for this, since
now the data from an arbitrary file descriptor (which could be a
regular file, a block device or a socket) can be tranferred into the
fuse device without having to go through a userspace buffer. It will
also allow zero copy moving of pages.
One caveat is that the protocol on the fuse device requires the length
of the whole message to be written into the header. But the length of
the data transferred into the temporary pipe may not be known in
advance. The current library implementation works around this by
using vmplice to write the header and modifying the header after
splicing the data into the pipe (error handling omitted):
struct fuse_out_header out;
iov.iov_base = &out;
iov.iov_len = sizeof(struct fuse_out_header);
vmsplice(pip[1], &iov, 1, 0);
len = splice(input_fd, input_offset, pip[1], NULL, len, 0);
/* retrospectively modify the header: */
out.len = len + sizeof(struct fuse_out_header);
splice(pip[0], NULL, fuse_chan_fd(req->ch), NULL, out.len, flags);
This works since vmsplice only saves a pointer to the data, it does
not copy the data itself.
Since pipes are currently limited to 16 pages and messages need to be
spliced atomically, the length of the data is limited to 15 pages (or
60kB for 4k pages).
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
2010-05-25 20:06:06 +07:00
|
|
|
cs->currbuf = NULL;
|
2014-07-07 20:28:51 +07:00
|
|
|
} else if (cs->pg) {
|
2005-09-10 03:10:27 +07:00
|
|
|
if (cs->write) {
|
|
|
|
flush_dcache_page(cs->pg);
|
|
|
|
set_page_dirty_lock(cs->pg);
|
|
|
|
}
|
|
|
|
put_page(cs->pg);
|
|
|
|
}
|
2014-07-07 20:28:51 +07:00
|
|
|
cs->pg = NULL;
|
2005-09-10 03:10:27 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get another pagefull of userspace buffer, and map it to kernel
|
|
|
|
* address space, and lock request
|
|
|
|
*/
|
|
|
|
static int fuse_copy_fill(struct fuse_copy_state *cs)
|
|
|
|
{
|
2014-07-07 20:28:51 +07:00
|
|
|
struct page *page;
|
2005-09-10 03:10:27 +07:00
|
|
|
int err;
|
|
|
|
|
2015-07-01 21:25:58 +07:00
|
|
|
err = unlock_request(cs->req);
|
2015-07-01 21:25:58 +07:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2005-09-10 03:10:27 +07:00
|
|
|
fuse_copy_finish(cs);
|
fuse: support splice() writing to fuse device
Allow userspace filesystem implementation to use splice() to write to
the fuse device. The semantics of using splice() are:
1) buffer the message header and data in a temporary pipe
2) with a *single* splice() call move the message from the temporary pipe
to the fuse device
The READ reply message has the most interesting use for this, since
now the data from an arbitrary file descriptor (which could be a
regular file, a block device or a socket) can be tranferred into the
fuse device without having to go through a userspace buffer. It will
also allow zero copy moving of pages.
One caveat is that the protocol on the fuse device requires the length
of the whole message to be written into the header. But the length of
the data transferred into the temporary pipe may not be known in
advance. The current library implementation works around this by
using vmplice to write the header and modifying the header after
splicing the data into the pipe (error handling omitted):
struct fuse_out_header out;
iov.iov_base = &out;
iov.iov_len = sizeof(struct fuse_out_header);
vmsplice(pip[1], &iov, 1, 0);
len = splice(input_fd, input_offset, pip[1], NULL, len, 0);
/* retrospectively modify the header: */
out.len = len + sizeof(struct fuse_out_header);
splice(pip[0], NULL, fuse_chan_fd(req->ch), NULL, out.len, flags);
This works since vmsplice only saves a pointer to the data, it does
not copy the data itself.
Since pipes are currently limited to 16 pages and messages need to be
spliced atomically, the length of the data is limited to 15 pages (or
60kB for 4k pages).
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
2010-05-25 20:06:06 +07:00
|
|
|
if (cs->pipebufs) {
|
|
|
|
struct pipe_buffer *buf = cs->pipebufs;
|
|
|
|
|
2010-05-25 20:06:07 +07:00
|
|
|
if (!cs->write) {
|
|
|
|
err = buf->ops->confirm(cs->pipe, buf);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
BUG_ON(!cs->nr_segs);
|
|
|
|
cs->currbuf = buf;
|
2014-07-07 20:28:51 +07:00
|
|
|
cs->pg = buf->page;
|
|
|
|
cs->offset = buf->offset;
|
2010-05-25 20:06:07 +07:00
|
|
|
cs->len = buf->len;
|
|
|
|
cs->pipebufs++;
|
|
|
|
cs->nr_segs--;
|
|
|
|
} else {
|
|
|
|
if (cs->nr_segs == cs->pipe->buffers)
|
|
|
|
return -EIO;
|
|
|
|
|
|
|
|
page = alloc_page(GFP_HIGHUSER);
|
|
|
|
if (!page)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
buf->page = page;
|
|
|
|
buf->offset = 0;
|
|
|
|
buf->len = 0;
|
|
|
|
|
|
|
|
cs->currbuf = buf;
|
2014-07-07 20:28:51 +07:00
|
|
|
cs->pg = page;
|
|
|
|
cs->offset = 0;
|
2010-05-25 20:06:07 +07:00
|
|
|
cs->len = PAGE_SIZE;
|
|
|
|
cs->pipebufs++;
|
|
|
|
cs->nr_segs++;
|
|
|
|
}
|
fuse: support splice() writing to fuse device
Allow userspace filesystem implementation to use splice() to write to
the fuse device. The semantics of using splice() are:
1) buffer the message header and data in a temporary pipe
2) with a *single* splice() call move the message from the temporary pipe
to the fuse device
The READ reply message has the most interesting use for this, since
now the data from an arbitrary file descriptor (which could be a
regular file, a block device or a socket) can be tranferred into the
fuse device without having to go through a userspace buffer. It will
also allow zero copy moving of pages.
One caveat is that the protocol on the fuse device requires the length
of the whole message to be written into the header. But the length of
the data transferred into the temporary pipe may not be known in
advance. The current library implementation works around this by
using vmplice to write the header and modifying the header after
splicing the data into the pipe (error handling omitted):
struct fuse_out_header out;
iov.iov_base = &out;
iov.iov_len = sizeof(struct fuse_out_header);
vmsplice(pip[1], &iov, 1, 0);
len = splice(input_fd, input_offset, pip[1], NULL, len, 0);
/* retrospectively modify the header: */
out.len = len + sizeof(struct fuse_out_header);
splice(pip[0], NULL, fuse_chan_fd(req->ch), NULL, out.len, flags);
This works since vmsplice only saves a pointer to the data, it does
not copy the data itself.
Since pipes are currently limited to 16 pages and messages need to be
spliced atomically, the length of the data is limited to 15 pages (or
60kB for 4k pages).
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
2010-05-25 20:06:06 +07:00
|
|
|
} else {
|
2015-04-04 09:06:08 +07:00
|
|
|
size_t off;
|
|
|
|
err = iov_iter_get_pages(cs->iter, &page, PAGE_SIZE, 1, &off);
|
fuse: support splice() writing to fuse device
Allow userspace filesystem implementation to use splice() to write to
the fuse device. The semantics of using splice() are:
1) buffer the message header and data in a temporary pipe
2) with a *single* splice() call move the message from the temporary pipe
to the fuse device
The READ reply message has the most interesting use for this, since
now the data from an arbitrary file descriptor (which could be a
regular file, a block device or a socket) can be tranferred into the
fuse device without having to go through a userspace buffer. It will
also allow zero copy moving of pages.
One caveat is that the protocol on the fuse device requires the length
of the whole message to be written into the header. But the length of
the data transferred into the temporary pipe may not be known in
advance. The current library implementation works around this by
using vmplice to write the header and modifying the header after
splicing the data into the pipe (error handling omitted):
struct fuse_out_header out;
iov.iov_base = &out;
iov.iov_len = sizeof(struct fuse_out_header);
vmsplice(pip[1], &iov, 1, 0);
len = splice(input_fd, input_offset, pip[1], NULL, len, 0);
/* retrospectively modify the header: */
out.len = len + sizeof(struct fuse_out_header);
splice(pip[0], NULL, fuse_chan_fd(req->ch), NULL, out.len, flags);
This works since vmsplice only saves a pointer to the data, it does
not copy the data itself.
Since pipes are currently limited to 16 pages and messages need to be
spliced atomically, the length of the data is limited to 15 pages (or
60kB for 4k pages).
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
2010-05-25 20:06:06 +07:00
|
|
|
if (err < 0)
|
|
|
|
return err;
|
2015-04-04 09:06:08 +07:00
|
|
|
BUG_ON(!err);
|
|
|
|
cs->len = err;
|
|
|
|
cs->offset = off;
|
2014-07-07 20:28:51 +07:00
|
|
|
cs->pg = page;
|
2015-04-04 09:06:08 +07:00
|
|
|
cs->offset = off;
|
|
|
|
iov_iter_advance(cs->iter, err);
|
2005-09-10 03:10:27 +07:00
|
|
|
}
|
|
|
|
|
2015-07-01 21:25:58 +07:00
|
|
|
return lock_request(cs->req);
|
2005-09-10 03:10:27 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Do as much copy to/from userspace buffer as we can */
|
2006-01-17 13:14:28 +07:00
|
|
|
static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
|
2005-09-10 03:10:27 +07:00
|
|
|
{
|
|
|
|
unsigned ncpy = min(*size, cs->len);
|
|
|
|
if (val) {
|
2014-07-07 20:28:51 +07:00
|
|
|
void *pgaddr = kmap_atomic(cs->pg);
|
|
|
|
void *buf = pgaddr + cs->offset;
|
|
|
|
|
2005-09-10 03:10:27 +07:00
|
|
|
if (cs->write)
|
2014-07-07 20:28:51 +07:00
|
|
|
memcpy(buf, *val, ncpy);
|
2005-09-10 03:10:27 +07:00
|
|
|
else
|
2014-07-07 20:28:51 +07:00
|
|
|
memcpy(*val, buf, ncpy);
|
|
|
|
|
|
|
|
kunmap_atomic(pgaddr);
|
2005-09-10 03:10:27 +07:00
|
|
|
*val += ncpy;
|
|
|
|
}
|
|
|
|
*size -= ncpy;
|
|
|
|
cs->len -= ncpy;
|
2014-07-07 20:28:51 +07:00
|
|
|
cs->offset += ncpy;
|
2005-09-10 03:10:27 +07:00
|
|
|
return ncpy;
|
|
|
|
}
|
|
|
|
|
2010-05-25 20:06:07 +07:00
|
|
|
static int fuse_check_page(struct page *page)
|
|
|
|
{
|
|
|
|
if (page_mapcount(page) ||
|
|
|
|
page->mapping != NULL ||
|
|
|
|
page_count(page) != 1 ||
|
|
|
|
(page->flags & PAGE_FLAGS_CHECK_AT_PREP &
|
|
|
|
~(1 << PG_locked |
|
|
|
|
1 << PG_referenced |
|
|
|
|
1 << PG_uptodate |
|
|
|
|
1 << PG_lru |
|
|
|
|
1 << PG_active |
|
|
|
|
1 << PG_reclaim))) {
|
|
|
|
printk(KERN_WARNING "fuse: trying to steal weird page\n");
|
|
|
|
printk(KERN_WARNING " page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page, page->index, page->flags, page_count(page), page_mapcount(page), page->mapping);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
struct page *oldpage = *pagep;
|
|
|
|
struct page *newpage;
|
|
|
|
struct pipe_buffer *buf = cs->pipebufs;
|
|
|
|
|
2015-07-01 21:25:58 +07:00
|
|
|
err = unlock_request(cs->req);
|
2015-07-01 21:25:58 +07:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2010-05-25 20:06:07 +07:00
|
|
|
fuse_copy_finish(cs);
|
|
|
|
|
|
|
|
err = buf->ops->confirm(cs->pipe, buf);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
BUG_ON(!cs->nr_segs);
|
|
|
|
cs->currbuf = buf;
|
|
|
|
cs->len = buf->len;
|
|
|
|
cs->pipebufs++;
|
|
|
|
cs->nr_segs--;
|
|
|
|
|
|
|
|
if (cs->len != PAGE_SIZE)
|
|
|
|
goto out_fallback;
|
|
|
|
|
|
|
|
if (buf->ops->steal(cs->pipe, buf) != 0)
|
|
|
|
goto out_fallback;
|
|
|
|
|
|
|
|
newpage = buf->page;
|
|
|
|
|
2015-02-26 17:45:47 +07:00
|
|
|
if (!PageUptodate(newpage))
|
|
|
|
SetPageUptodate(newpage);
|
2010-05-25 20:06:07 +07:00
|
|
|
|
|
|
|
ClearPageMappedToDisk(newpage);
|
|
|
|
|
|
|
|
if (fuse_check_page(newpage) != 0)
|
|
|
|
goto out_fallback_unlock;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is a new and locked page, it shouldn't be mapped or
|
|
|
|
* have any special flags on it
|
|
|
|
*/
|
|
|
|
if (WARN_ON(page_mapped(oldpage)))
|
|
|
|
goto out_fallback_unlock;
|
|
|
|
if (WARN_ON(page_has_private(oldpage)))
|
|
|
|
goto out_fallback_unlock;
|
|
|
|
if (WARN_ON(PageDirty(oldpage) || PageWriteback(oldpage)))
|
|
|
|
goto out_fallback_unlock;
|
|
|
|
if (WARN_ON(PageMlocked(oldpage)))
|
|
|
|
goto out_fallback_unlock;
|
|
|
|
|
2011-03-23 06:30:52 +07:00
|
|
|
err = replace_page_cache_page(oldpage, newpage, GFP_KERNEL);
|
2010-05-25 20:06:07 +07:00
|
|
|
if (err) {
|
2011-03-23 06:30:52 +07:00
|
|
|
unlock_page(newpage);
|
|
|
|
return err;
|
2010-05-25 20:06:07 +07:00
|
|
|
}
|
2011-03-23 06:30:52 +07:00
|
|
|
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 19:29:47 +07:00
|
|
|
get_page(newpage);
|
2010-05-25 20:06:07 +07:00
|
|
|
|
|
|
|
if (!(buf->flags & PIPE_BUF_FLAG_LRU))
|
|
|
|
lru_cache_add_file(newpage);
|
|
|
|
|
|
|
|
err = 0;
|
2015-07-01 21:25:58 +07:00
|
|
|
spin_lock(&cs->req->waitq.lock);
|
2015-07-01 21:25:58 +07:00
|
|
|
if (test_bit(FR_ABORTED, &cs->req->flags))
|
2010-05-25 20:06:07 +07:00
|
|
|
err = -ENOENT;
|
|
|
|
else
|
|
|
|
*pagep = newpage;
|
2015-07-01 21:25:58 +07:00
|
|
|
spin_unlock(&cs->req->waitq.lock);
|
2010-05-25 20:06:07 +07:00
|
|
|
|
|
|
|
if (err) {
|
|
|
|
unlock_page(newpage);
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 19:29:47 +07:00
|
|
|
put_page(newpage);
|
2010-05-25 20:06:07 +07:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
unlock_page(oldpage);
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 19:29:47 +07:00
|
|
|
put_page(oldpage);
|
2010-05-25 20:06:07 +07:00
|
|
|
cs->len = 0;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_fallback_unlock:
|
|
|
|
unlock_page(newpage);
|
|
|
|
out_fallback:
|
2014-07-07 20:28:51 +07:00
|
|
|
cs->pg = buf->page;
|
|
|
|
cs->offset = buf->offset;
|
2010-05-25 20:06:07 +07:00
|
|
|
|
2015-07-01 21:25:58 +07:00
|
|
|
err = lock_request(cs->req);
|
2010-05-25 20:06:07 +07:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2010-05-25 20:06:07 +07:00
|
|
|
static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
|
|
|
|
unsigned offset, unsigned count)
|
|
|
|
{
|
|
|
|
struct pipe_buffer *buf;
|
2015-07-01 21:25:58 +07:00
|
|
|
int err;
|
2010-05-25 20:06:07 +07:00
|
|
|
|
|
|
|
if (cs->nr_segs == cs->pipe->buffers)
|
|
|
|
return -EIO;
|
|
|
|
|
2015-07-01 21:25:58 +07:00
|
|
|
err = unlock_request(cs->req);
|
2015-07-01 21:25:58 +07:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2010-05-25 20:06:07 +07:00
|
|
|
fuse_copy_finish(cs);
|
|
|
|
|
|
|
|
buf = cs->pipebufs;
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 19:29:47 +07:00
|
|
|
get_page(page);
|
2010-05-25 20:06:07 +07:00
|
|
|
buf->page = page;
|
|
|
|
buf->offset = offset;
|
|
|
|
buf->len = count;
|
|
|
|
|
|
|
|
cs->pipebufs++;
|
|
|
|
cs->nr_segs++;
|
|
|
|
cs->len = 0;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-09-10 03:10:27 +07:00
|
|
|
/*
|
|
|
|
* Copy a page in the request to/from the userspace buffer. Must be
|
|
|
|
* done atomically
|
|
|
|
*/
|
2010-05-25 20:06:07 +07:00
|
|
|
static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
|
2006-01-17 13:14:28 +07:00
|
|
|
unsigned offset, unsigned count, int zeroing)
|
2005-09-10 03:10:27 +07:00
|
|
|
{
|
2010-05-25 20:06:07 +07:00
|
|
|
int err;
|
|
|
|
struct page *page = *pagep;
|
|
|
|
|
2010-10-27 04:22:27 +07:00
|
|
|
if (page && zeroing && count < PAGE_SIZE)
|
|
|
|
clear_highpage(page);
|
|
|
|
|
2005-09-10 03:10:27 +07:00
|
|
|
while (count) {
|
2010-05-25 20:06:07 +07:00
|
|
|
if (cs->write && cs->pipebufs && page) {
|
|
|
|
return fuse_ref_page(cs, page, offset, count);
|
|
|
|
} else if (!cs->len) {
|
2010-05-25 20:06:07 +07:00
|
|
|
if (cs->move_pages && page &&
|
|
|
|
offset == 0 && count == PAGE_SIZE) {
|
|
|
|
err = fuse_try_move_page(cs, pagep);
|
|
|
|
if (err <= 0)
|
|
|
|
return err;
|
|
|
|
} else {
|
|
|
|
err = fuse_copy_fill(cs);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
2008-11-26 18:03:54 +07:00
|
|
|
}
|
2005-09-10 03:10:27 +07:00
|
|
|
if (page) {
|
2011-11-25 22:14:30 +07:00
|
|
|
void *mapaddr = kmap_atomic(page);
|
2005-09-10 03:10:27 +07:00
|
|
|
void *buf = mapaddr + offset;
|
|
|
|
offset += fuse_copy_do(cs, &buf, &count);
|
2011-11-25 22:14:30 +07:00
|
|
|
kunmap_atomic(mapaddr);
|
2005-09-10 03:10:27 +07:00
|
|
|
} else
|
|
|
|
offset += fuse_copy_do(cs, NULL, &count);
|
|
|
|
}
|
|
|
|
if (page && !cs->write)
|
|
|
|
flush_dcache_page(page);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Copy pages in the request to/from userspace buffer */
|
|
|
|
static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
|
|
|
|
int zeroing)
|
|
|
|
{
|
|
|
|
unsigned i;
|
|
|
|
struct fuse_req *req = cs->req;
|
|
|
|
|
|
|
|
for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
|
2010-05-25 20:06:07 +07:00
|
|
|
int err;
|
2012-10-26 22:49:33 +07:00
|
|
|
unsigned offset = req->page_descs[i].offset;
|
|
|
|
unsigned count = min(nbytes, req->page_descs[i].length);
|
2010-05-25 20:06:07 +07:00
|
|
|
|
|
|
|
err = fuse_copy_page(cs, &req->pages[i], offset, count,
|
|
|
|
zeroing);
|
2005-09-10 03:10:27 +07:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
nbytes -= count;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Copy a single argument in the request to/from userspace buffer */
|
|
|
|
static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
|
|
|
|
{
|
|
|
|
while (size) {
|
2008-11-26 18:03:54 +07:00
|
|
|
if (!cs->len) {
|
|
|
|
int err = fuse_copy_fill(cs);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
2005-09-10 03:10:27 +07:00
|
|
|
fuse_copy_do(cs, &val, &size);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Copy request arguments to/from userspace buffer */
|
|
|
|
static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
|
|
|
|
unsigned argpages, struct fuse_arg *args,
|
|
|
|
int zeroing)
|
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
unsigned i;
|
|
|
|
|
|
|
|
for (i = 0; !err && i < numargs; i++) {
|
|
|
|
struct fuse_arg *arg = &args[i];
|
|
|
|
if (i == numargs - 1 && argpages)
|
|
|
|
err = fuse_copy_pages(cs, arg->size, zeroing);
|
|
|
|
else
|
|
|
|
err = fuse_copy_one(cs, arg->value, arg->size);
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2015-07-01 21:26:01 +07:00
|
|
|
static int forget_pending(struct fuse_iqueue *fiq)
|
2010-12-08 02:16:56 +07:00
|
|
|
{
|
2015-07-01 21:26:01 +07:00
|
|
|
return fiq->forget_list_head.next != NULL;
|
2010-12-08 02:16:56 +07:00
|
|
|
}
|
|
|
|
|
2015-07-01 21:26:01 +07:00
|
|
|
static int request_pending(struct fuse_iqueue *fiq)
|
2006-06-25 19:48:54 +07:00
|
|
|
{
|
2015-07-01 21:26:01 +07:00
|
|
|
return !list_empty(&fiq->pending) || !list_empty(&fiq->interrupts) ||
|
|
|
|
forget_pending(fiq);
|
2006-06-25 19:48:54 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Transfer an interrupt request to userspace
|
|
|
|
*
|
|
|
|
* Unlike other requests this is assembled on demand, without a need
|
|
|
|
* to allocate a separate fuse_req structure.
|
|
|
|
*
|
2015-07-01 21:26:03 +07:00
|
|
|
* Called with fiq->waitq.lock held, releases it
|
2006-06-25 19:48:54 +07:00
|
|
|
*/
|
2015-07-01 21:26:03 +07:00
|
|
|
static int fuse_read_interrupt(struct fuse_iqueue *fiq,
|
|
|
|
struct fuse_copy_state *cs,
|
2010-05-25 20:06:07 +07:00
|
|
|
size_t nbytes, struct fuse_req *req)
|
2015-07-01 21:26:03 +07:00
|
|
|
__releases(fiq->waitq.lock)
|
2006-06-25 19:48:54 +07:00
|
|
|
{
|
|
|
|
struct fuse_in_header ih;
|
|
|
|
struct fuse_interrupt_in arg;
|
|
|
|
unsigned reqsize = sizeof(ih) + sizeof(arg);
|
|
|
|
int err;
|
|
|
|
|
|
|
|
list_del_init(&req->intr_entry);
|
2015-07-01 21:26:02 +07:00
|
|
|
req->intr_unique = fuse_get_unique(fiq);
|
2006-06-25 19:48:54 +07:00
|
|
|
memset(&ih, 0, sizeof(ih));
|
|
|
|
memset(&arg, 0, sizeof(arg));
|
|
|
|
ih.len = reqsize;
|
|
|
|
ih.opcode = FUSE_INTERRUPT;
|
|
|
|
ih.unique = req->intr_unique;
|
|
|
|
arg.unique = req->in.h.unique;
|
|
|
|
|
2015-07-01 21:26:02 +07:00
|
|
|
spin_unlock(&fiq->waitq.lock);
|
2010-05-25 20:06:07 +07:00
|
|
|
if (nbytes < reqsize)
|
2006-06-25 19:48:54 +07:00
|
|
|
return -EINVAL;
|
|
|
|
|
2010-05-25 20:06:07 +07:00
|
|
|
err = fuse_copy_one(cs, &ih, sizeof(ih));
|
2006-06-25 19:48:54 +07:00
|
|
|
if (!err)
|
2010-05-25 20:06:07 +07:00
|
|
|
err = fuse_copy_one(cs, &arg, sizeof(arg));
|
|
|
|
fuse_copy_finish(cs);
|
2006-06-25 19:48:54 +07:00
|
|
|
|
|
|
|
return err ? err : reqsize;
|
|
|
|
}
|
|
|
|
|
2015-07-01 21:26:01 +07:00
|
|
|
static struct fuse_forget_link *dequeue_forget(struct fuse_iqueue *fiq,
|
2010-12-08 02:16:56 +07:00
|
|
|
unsigned max,
|
|
|
|
unsigned *countp)
|
2010-12-08 02:16:56 +07:00
|
|
|
{
|
2015-07-01 21:26:01 +07:00
|
|
|
struct fuse_forget_link *head = fiq->forget_list_head.next;
|
2010-12-08 02:16:56 +07:00
|
|
|
struct fuse_forget_link **newhead = &head;
|
|
|
|
unsigned count;
|
2010-12-08 02:16:56 +07:00
|
|
|
|
2010-12-08 02:16:56 +07:00
|
|
|
for (count = 0; *newhead != NULL && count < max; count++)
|
|
|
|
newhead = &(*newhead)->next;
|
|
|
|
|
2015-07-01 21:26:01 +07:00
|
|
|
fiq->forget_list_head.next = *newhead;
|
2010-12-08 02:16:56 +07:00
|
|
|
*newhead = NULL;
|
2015-07-01 21:26:01 +07:00
|
|
|
if (fiq->forget_list_head.next == NULL)
|
|
|
|
fiq->forget_list_tail = &fiq->forget_list_head;
|
2010-12-08 02:16:56 +07:00
|
|
|
|
2010-12-08 02:16:56 +07:00
|
|
|
if (countp != NULL)
|
|
|
|
*countp = count;
|
|
|
|
|
|
|
|
return head;
|
2010-12-08 02:16:56 +07:00
|
|
|
}
|
|
|
|
|
2015-07-01 21:26:03 +07:00
|
|
|
static int fuse_read_single_forget(struct fuse_iqueue *fiq,
|
2010-12-08 02:16:56 +07:00
|
|
|
struct fuse_copy_state *cs,
|
|
|
|
size_t nbytes)
|
2015-07-01 21:26:03 +07:00
|
|
|
__releases(fiq->waitq.lock)
|
2010-12-08 02:16:56 +07:00
|
|
|
{
|
|
|
|
int err;
|
2015-07-01 21:26:01 +07:00
|
|
|
struct fuse_forget_link *forget = dequeue_forget(fiq, 1, NULL);
|
2010-12-08 02:16:56 +07:00
|
|
|
struct fuse_forget_in arg = {
|
2010-12-08 02:16:56 +07:00
|
|
|
.nlookup = forget->forget_one.nlookup,
|
2010-12-08 02:16:56 +07:00
|
|
|
};
|
|
|
|
struct fuse_in_header ih = {
|
|
|
|
.opcode = FUSE_FORGET,
|
2010-12-08 02:16:56 +07:00
|
|
|
.nodeid = forget->forget_one.nodeid,
|
2015-07-01 21:26:01 +07:00
|
|
|
.unique = fuse_get_unique(fiq),
|
2010-12-08 02:16:56 +07:00
|
|
|
.len = sizeof(ih) + sizeof(arg),
|
|
|
|
};
|
|
|
|
|
2015-07-01 21:26:02 +07:00
|
|
|
spin_unlock(&fiq->waitq.lock);
|
2010-12-08 02:16:56 +07:00
|
|
|
kfree(forget);
|
|
|
|
if (nbytes < ih.len)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
err = fuse_copy_one(cs, &ih, sizeof(ih));
|
|
|
|
if (!err)
|
|
|
|
err = fuse_copy_one(cs, &arg, sizeof(arg));
|
|
|
|
fuse_copy_finish(cs);
|
|
|
|
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
return ih.len;
|
|
|
|
}
|
|
|
|
|
2015-07-01 21:26:03 +07:00
|
|
|
static int fuse_read_batch_forget(struct fuse_iqueue *fiq,
|
2010-12-08 02:16:56 +07:00
|
|
|
struct fuse_copy_state *cs, size_t nbytes)
|
2015-07-01 21:26:03 +07:00
|
|
|
__releases(fiq->waitq.lock)
|
2010-12-08 02:16:56 +07:00
|
|
|
{
|
|
|
|
int err;
|
|
|
|
unsigned max_forgets;
|
|
|
|
unsigned count;
|
|
|
|
struct fuse_forget_link *head;
|
|
|
|
struct fuse_batch_forget_in arg = { .count = 0 };
|
|
|
|
struct fuse_in_header ih = {
|
|
|
|
.opcode = FUSE_BATCH_FORGET,
|
2015-07-01 21:26:01 +07:00
|
|
|
.unique = fuse_get_unique(fiq),
|
2010-12-08 02:16:56 +07:00
|
|
|
.len = sizeof(ih) + sizeof(arg),
|
|
|
|
};
|
|
|
|
|
|
|
|
if (nbytes < ih.len) {
|
2015-07-01 21:26:02 +07:00
|
|
|
spin_unlock(&fiq->waitq.lock);
|
2010-12-08 02:16:56 +07:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one);
|
2015-07-01 21:26:01 +07:00
|
|
|
head = dequeue_forget(fiq, max_forgets, &count);
|
2015-07-01 21:26:02 +07:00
|
|
|
spin_unlock(&fiq->waitq.lock);
|
2010-12-08 02:16:56 +07:00
|
|
|
|
|
|
|
arg.count = count;
|
|
|
|
ih.len += count * sizeof(struct fuse_forget_one);
|
|
|
|
err = fuse_copy_one(cs, &ih, sizeof(ih));
|
|
|
|
if (!err)
|
|
|
|
err = fuse_copy_one(cs, &arg, sizeof(arg));
|
|
|
|
|
|
|
|
while (head) {
|
|
|
|
struct fuse_forget_link *forget = head;
|
|
|
|
|
|
|
|
if (!err) {
|
|
|
|
err = fuse_copy_one(cs, &forget->forget_one,
|
|
|
|
sizeof(forget->forget_one));
|
|
|
|
}
|
|
|
|
head = forget->next;
|
|
|
|
kfree(forget);
|
|
|
|
}
|
|
|
|
|
|
|
|
fuse_copy_finish(cs);
|
|
|
|
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
return ih.len;
|
|
|
|
}
|
|
|
|
|
2015-07-01 21:26:03 +07:00
|
|
|
static int fuse_read_forget(struct fuse_conn *fc, struct fuse_iqueue *fiq,
|
|
|
|
struct fuse_copy_state *cs,
|
2010-12-08 02:16:56 +07:00
|
|
|
size_t nbytes)
|
2015-07-01 21:26:03 +07:00
|
|
|
__releases(fiq->waitq.lock)
|
2010-12-08 02:16:56 +07:00
|
|
|
{
|
2015-07-01 21:26:01 +07:00
|
|
|
if (fc->minor < 16 || fiq->forget_list_head.next->next == NULL)
|
2015-07-01 21:26:03 +07:00
|
|
|
return fuse_read_single_forget(fiq, cs, nbytes);
|
2010-12-08 02:16:56 +07:00
|
|
|
else
|
2015-07-01 21:26:03 +07:00
|
|
|
return fuse_read_batch_forget(fiq, cs, nbytes);
|
2010-12-08 02:16:56 +07:00
|
|
|
}
|
|
|
|
|
2005-09-10 03:10:27 +07:00
|
|
|
/*
|
|
|
|
* Read a single request into the userspace filesystem's buffer. This
|
|
|
|
* function waits until a request is available, then removes it from
|
|
|
|
* the pending list and copies request data to userspace buffer. If
|
2006-06-25 19:48:53 +07:00
|
|
|
* no reply is needed (FORGET) or request has been aborted or there
|
|
|
|
* was an error during the copying then it's finished by calling
|
2005-09-10 03:10:27 +07:00
|
|
|
* request_end(). Otherwise add it to the processing list, and set
|
|
|
|
* the 'sent' flag.
|
|
|
|
*/
|
2015-07-01 21:26:09 +07:00
|
|
|
static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
|
2010-05-25 20:06:07 +07:00
|
|
|
struct fuse_copy_state *cs, size_t nbytes)
|
2005-09-10 03:10:27 +07:00
|
|
|
{
|
2015-07-01 21:26:05 +07:00
|
|
|
ssize_t err;
|
2015-07-01 21:26:09 +07:00
|
|
|
struct fuse_conn *fc = fud->fc;
|
2015-07-01 21:26:01 +07:00
|
|
|
struct fuse_iqueue *fiq = &fc->iq;
|
2015-07-01 21:26:09 +07:00
|
|
|
struct fuse_pqueue *fpq = &fud->pq;
|
2005-09-10 03:10:27 +07:00
|
|
|
struct fuse_req *req;
|
|
|
|
struct fuse_in *in;
|
|
|
|
unsigned reqsize;
|
|
|
|
|
2006-01-06 15:19:40 +07:00
|
|
|
restart:
|
2015-07-01 21:26:02 +07:00
|
|
|
spin_lock(&fiq->waitq.lock);
|
2006-04-11 12:54:53 +07:00
|
|
|
err = -EAGAIN;
|
2015-07-01 21:26:01 +07:00
|
|
|
if ((file->f_flags & O_NONBLOCK) && fiq->connected &&
|
2015-07-01 21:26:01 +07:00
|
|
|
!request_pending(fiq))
|
2006-04-11 12:54:53 +07:00
|
|
|
goto err_unlock;
|
|
|
|
|
2015-07-01 21:26:03 +07:00
|
|
|
err = wait_event_interruptible_exclusive_locked(fiq->waitq,
|
|
|
|
!fiq->connected || request_pending(fiq));
|
|
|
|
if (err)
|
|
|
|
goto err_unlock;
|
|
|
|
|
2005-09-10 03:10:27 +07:00
|
|
|
err = -ENODEV;
|
2015-07-01 21:26:01 +07:00
|
|
|
if (!fiq->connected)
|
2005-09-10 03:10:27 +07:00
|
|
|
goto err_unlock;
|
|
|
|
|
2015-07-01 21:26:01 +07:00
|
|
|
if (!list_empty(&fiq->interrupts)) {
|
|
|
|
req = list_entry(fiq->interrupts.next, struct fuse_req,
|
2006-06-25 19:48:54 +07:00
|
|
|
intr_entry);
|
2015-07-01 21:26:03 +07:00
|
|
|
return fuse_read_interrupt(fiq, cs, nbytes, req);
|
2006-06-25 19:48:54 +07:00
|
|
|
}
|
|
|
|
|
2015-07-01 21:26:01 +07:00
|
|
|
if (forget_pending(fiq)) {
|
|
|
|
if (list_empty(&fiq->pending) || fiq->forget_batch-- > 0)
|
2015-07-01 21:26:03 +07:00
|
|
|
return fuse_read_forget(fc, fiq, cs, nbytes);
|
2010-12-08 02:16:56 +07:00
|
|
|
|
2015-07-01 21:26:01 +07:00
|
|
|
if (fiq->forget_batch <= -8)
|
|
|
|
fiq->forget_batch = 16;
|
2010-12-08 02:16:56 +07:00
|
|
|
}
|
|
|
|
|
2015-07-01 21:26:01 +07:00
|
|
|
req = list_entry(fiq->pending.next, struct fuse_req, list);
|
2015-07-01 21:26:01 +07:00
|
|
|
clear_bit(FR_PENDING, &req->flags);
|
2015-07-01 21:26:02 +07:00
|
|
|
list_del_init(&req->list);
|
2015-07-01 21:26:02 +07:00
|
|
|
spin_unlock(&fiq->waitq.lock);
|
|
|
|
|
2005-09-10 03:10:27 +07:00
|
|
|
in = &req->in;
|
2006-01-06 15:19:40 +07:00
|
|
|
reqsize = in->h.len;
|
|
|
|
/* If request is too large, reply with an error and restart the read */
|
2010-05-25 20:06:07 +07:00
|
|
|
if (nbytes < reqsize) {
|
2006-01-06 15:19:40 +07:00
|
|
|
req->out.h.error = -EIO;
|
|
|
|
/* SETXATTR is special, since it may contain too large data */
|
|
|
|
if (in->h.opcode == FUSE_SETXATTR)
|
|
|
|
req->out.h.error = -E2BIG;
|
|
|
|
request_end(fc, req);
|
|
|
|
goto restart;
|
2005-09-10 03:10:27 +07:00
|
|
|
}
|
2015-07-01 21:26:06 +07:00
|
|
|
spin_lock(&fpq->lock);
|
2015-07-01 21:26:05 +07:00
|
|
|
list_add(&req->list, &fpq->io);
|
2015-07-01 21:26:06 +07:00
|
|
|
spin_unlock(&fpq->lock);
|
2010-05-25 20:06:07 +07:00
|
|
|
cs->req = req;
|
|
|
|
err = fuse_copy_one(cs, &in->h, sizeof(in->h));
|
2006-01-06 15:19:40 +07:00
|
|
|
if (!err)
|
2010-05-25 20:06:07 +07:00
|
|
|
err = fuse_copy_args(cs, in->numargs, in->argpages,
|
2006-01-06 15:19:40 +07:00
|
|
|
(struct fuse_arg *) in->args, 0);
|
2010-05-25 20:06:07 +07:00
|
|
|
fuse_copy_finish(cs);
|
2015-07-01 21:26:06 +07:00
|
|
|
spin_lock(&fpq->lock);
|
2015-07-01 21:25:58 +07:00
|
|
|
clear_bit(FR_LOCKED, &req->flags);
|
2015-07-01 21:26:04 +07:00
|
|
|
if (!fpq->connected) {
|
2015-07-01 21:26:05 +07:00
|
|
|
err = -ENODEV;
|
|
|
|
goto out_end;
|
2007-10-17 13:31:05 +07:00
|
|
|
}
|
2005-09-10 03:10:27 +07:00
|
|
|
if (err) {
|
2007-10-17 13:31:05 +07:00
|
|
|
req->out.h.error = -EIO;
|
2015-07-01 21:26:05 +07:00
|
|
|
goto out_end;
|
2005-09-10 03:10:27 +07:00
|
|
|
}
|
2015-07-01 21:25:58 +07:00
|
|
|
if (!test_bit(FR_ISREPLY, &req->flags)) {
|
2015-07-01 21:26:05 +07:00
|
|
|
err = reqsize;
|
|
|
|
goto out_end;
|
2005-09-10 03:10:27 +07:00
|
|
|
}
|
2015-07-01 21:26:05 +07:00
|
|
|
list_move_tail(&req->list, &fpq->processing);
|
2015-07-01 21:26:06 +07:00
|
|
|
spin_unlock(&fpq->lock);
|
2015-07-01 21:26:05 +07:00
|
|
|
set_bit(FR_SENT, &req->flags);
|
|
|
|
/* matches barrier in request_wait_answer() */
|
|
|
|
smp_mb__after_atomic();
|
|
|
|
if (test_bit(FR_INTERRUPTED, &req->flags))
|
|
|
|
queue_interrupt(fiq, req);
|
|
|
|
|
2005-09-10 03:10:27 +07:00
|
|
|
return reqsize;
|
|
|
|
|
2015-07-01 21:26:05 +07:00
|
|
|
out_end:
|
2015-07-01 21:26:06 +07:00
|
|
|
if (!test_bit(FR_PRIVATE, &req->flags))
|
|
|
|
list_del_init(&req->list);
|
2015-07-01 21:26:06 +07:00
|
|
|
spin_unlock(&fpq->lock);
|
2015-07-01 21:26:05 +07:00
|
|
|
request_end(fc, req);
|
|
|
|
return err;
|
|
|
|
|
2005-09-10 03:10:27 +07:00
|
|
|
err_unlock:
|
2015-07-01 21:26:02 +07:00
|
|
|
spin_unlock(&fiq->waitq.lock);
|
2005-09-10 03:10:27 +07:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2015-01-12 11:22:16 +07:00
|
|
|
static int fuse_dev_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* The fuse device's file's private_data is used to hold
|
|
|
|
* the fuse_conn(ection) when it is mounted, and is used to
|
|
|
|
* keep track of whether the file has been mounted already.
|
|
|
|
*/
|
|
|
|
file->private_data = NULL;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-04-04 08:53:39 +07:00
|
|
|
static ssize_t fuse_dev_read(struct kiocb *iocb, struct iov_iter *to)
|
2010-05-25 20:06:07 +07:00
|
|
|
{
|
|
|
|
struct fuse_copy_state cs;
|
|
|
|
struct file *file = iocb->ki_filp;
|
2015-07-01 21:26:08 +07:00
|
|
|
struct fuse_dev *fud = fuse_get_dev(file);
|
|
|
|
|
|
|
|
if (!fud)
|
2010-05-25 20:06:07 +07:00
|
|
|
return -EPERM;
|
|
|
|
|
2015-04-04 08:53:39 +07:00
|
|
|
if (!iter_is_iovec(to))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2015-07-01 21:25:58 +07:00
|
|
|
fuse_copy_init(&cs, 1, to);
|
2010-05-25 20:06:07 +07:00
|
|
|
|
2015-07-01 21:26:09 +07:00
|
|
|
return fuse_dev_do_read(fud, file, &cs, iov_iter_count(to));
|
2010-05-25 20:06:07 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
|
|
|
|
struct pipe_inode_info *pipe,
|
|
|
|
size_t len, unsigned int flags)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
int page_nr = 0;
|
|
|
|
int do_wakeup = 0;
|
|
|
|
struct pipe_buffer *bufs;
|
|
|
|
struct fuse_copy_state cs;
|
2015-07-01 21:26:08 +07:00
|
|
|
struct fuse_dev *fud = fuse_get_dev(in);
|
|
|
|
|
|
|
|
if (!fud)
|
2010-05-25 20:06:07 +07:00
|
|
|
return -EPERM;
|
|
|
|
|
2010-12-08 02:16:56 +07:00
|
|
|
bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
|
2010-05-25 20:06:07 +07:00
|
|
|
if (!bufs)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2015-07-01 21:25:58 +07:00
|
|
|
fuse_copy_init(&cs, 1, NULL);
|
2010-05-25 20:06:07 +07:00
|
|
|
cs.pipebufs = bufs;
|
|
|
|
cs.pipe = pipe;
|
2015-07-01 21:26:09 +07:00
|
|
|
ret = fuse_dev_do_read(fud, in, &cs, len);
|
2010-05-25 20:06:07 +07:00
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
pipe_lock(pipe);
|
|
|
|
|
|
|
|
if (!pipe->readers) {
|
|
|
|
send_sig(SIGPIPE, current, 0);
|
|
|
|
if (!ret)
|
|
|
|
ret = -EPIPE;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pipe->nrbufs + cs.nr_segs > pipe->buffers) {
|
|
|
|
ret = -EIO;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (page_nr < cs.nr_segs) {
|
|
|
|
int newbuf = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
|
|
|
|
struct pipe_buffer *buf = pipe->bufs + newbuf;
|
|
|
|
|
|
|
|
buf->page = bufs[page_nr].page;
|
|
|
|
buf->offset = bufs[page_nr].offset;
|
|
|
|
buf->len = bufs[page_nr].len;
|
2014-01-23 01:36:57 +07:00
|
|
|
/*
|
|
|
|
* Need to be careful about this. Having buf->ops in module
|
|
|
|
* code can Oops if the buffer persists after module unload.
|
|
|
|
*/
|
|
|
|
buf->ops = &nosteal_pipe_buf_ops;
|
2010-05-25 20:06:07 +07:00
|
|
|
|
|
|
|
pipe->nrbufs++;
|
|
|
|
page_nr++;
|
|
|
|
ret += buf->len;
|
|
|
|
|
2013-03-21 22:01:38 +07:00
|
|
|
if (pipe->files)
|
2010-05-25 20:06:07 +07:00
|
|
|
do_wakeup = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
out_unlock:
|
|
|
|
pipe_unlock(pipe);
|
|
|
|
|
|
|
|
if (do_wakeup) {
|
|
|
|
smp_mb();
|
|
|
|
if (waitqueue_active(&pipe->wait))
|
|
|
|
wake_up_interruptible(&pipe->wait);
|
|
|
|
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
for (; page_nr < cs.nr_segs; page_nr++)
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 19:29:47 +07:00
|
|
|
put_page(bufs[page_nr].page);
|
2010-05-25 20:06:07 +07:00
|
|
|
|
|
|
|
kfree(bufs);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-11-26 18:03:55 +07:00
|
|
|
static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
|
|
|
|
struct fuse_copy_state *cs)
|
|
|
|
{
|
|
|
|
struct fuse_notify_poll_wakeup_out outarg;
|
2009-01-26 21:00:59 +07:00
|
|
|
int err = -EINVAL;
|
2008-11-26 18:03:55 +07:00
|
|
|
|
|
|
|
if (size != sizeof(outarg))
|
2009-01-26 21:00:59 +07:00
|
|
|
goto err;
|
2008-11-26 18:03:55 +07:00
|
|
|
|
|
|
|
err = fuse_copy_one(cs, &outarg, sizeof(outarg));
|
|
|
|
if (err)
|
2009-01-26 21:00:59 +07:00
|
|
|
goto err;
|
2008-11-26 18:03:55 +07:00
|
|
|
|
2009-01-26 21:00:59 +07:00
|
|
|
fuse_copy_finish(cs);
|
2008-11-26 18:03:55 +07:00
|
|
|
return fuse_notify_poll_wakeup(fc, &outarg);
|
2009-01-26 21:00:59 +07:00
|
|
|
|
|
|
|
err:
|
|
|
|
fuse_copy_finish(cs);
|
|
|
|
return err;
|
2008-11-26 18:03:55 +07:00
|
|
|
}
|
|
|
|
|
2009-05-31 22:13:57 +07:00
|
|
|
static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size,
|
|
|
|
struct fuse_copy_state *cs)
|
|
|
|
{
|
|
|
|
struct fuse_notify_inval_inode_out outarg;
|
|
|
|
int err = -EINVAL;
|
|
|
|
|
|
|
|
if (size != sizeof(outarg))
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
err = fuse_copy_one(cs, &outarg, sizeof(outarg));
|
|
|
|
if (err)
|
|
|
|
goto err;
|
|
|
|
fuse_copy_finish(cs);
|
|
|
|
|
|
|
|
down_read(&fc->killsb);
|
|
|
|
err = -ENOENT;
|
2010-02-05 18:08:31 +07:00
|
|
|
if (fc->sb) {
|
|
|
|
err = fuse_reverse_inval_inode(fc->sb, outarg.ino,
|
|
|
|
outarg.off, outarg.len);
|
|
|
|
}
|
2009-05-31 22:13:57 +07:00
|
|
|
up_read(&fc->killsb);
|
|
|
|
return err;
|
|
|
|
|
|
|
|
err:
|
|
|
|
fuse_copy_finish(cs);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
|
|
|
|
struct fuse_copy_state *cs)
|
|
|
|
{
|
|
|
|
struct fuse_notify_inval_entry_out outarg;
|
2009-12-30 17:37:13 +07:00
|
|
|
int err = -ENOMEM;
|
|
|
|
char *buf;
|
2009-05-31 22:13:57 +07:00
|
|
|
struct qstr name;
|
|
|
|
|
2009-12-30 17:37:13 +07:00
|
|
|
buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
|
|
|
|
if (!buf)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
err = -EINVAL;
|
2009-05-31 22:13:57 +07:00
|
|
|
if (size < sizeof(outarg))
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
err = fuse_copy_one(cs, &outarg, sizeof(outarg));
|
|
|
|
if (err)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
err = -ENAMETOOLONG;
|
|
|
|
if (outarg.namelen > FUSE_NAME_MAX)
|
|
|
|
goto err;
|
|
|
|
|
2011-08-24 15:20:17 +07:00
|
|
|
err = -EINVAL;
|
|
|
|
if (size != sizeof(outarg) + outarg.namelen + 1)
|
|
|
|
goto err;
|
|
|
|
|
2009-05-31 22:13:57 +07:00
|
|
|
name.name = buf;
|
|
|
|
name.len = outarg.namelen;
|
|
|
|
err = fuse_copy_one(cs, buf, outarg.namelen + 1);
|
|
|
|
if (err)
|
|
|
|
goto err;
|
|
|
|
fuse_copy_finish(cs);
|
|
|
|
buf[outarg.namelen] = 0;
|
|
|
|
|
|
|
|
down_read(&fc->killsb);
|
|
|
|
err = -ENOENT;
|
2010-02-05 18:08:31 +07:00
|
|
|
if (fc->sb)
|
2011-12-07 03:50:06 +07:00
|
|
|
err = fuse_reverse_inval_entry(fc->sb, outarg.parent, 0, &name);
|
|
|
|
up_read(&fc->killsb);
|
|
|
|
kfree(buf);
|
|
|
|
return err;
|
|
|
|
|
|
|
|
err:
|
|
|
|
kfree(buf);
|
|
|
|
fuse_copy_finish(cs);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size,
|
|
|
|
struct fuse_copy_state *cs)
|
|
|
|
{
|
|
|
|
struct fuse_notify_delete_out outarg;
|
|
|
|
int err = -ENOMEM;
|
|
|
|
char *buf;
|
|
|
|
struct qstr name;
|
|
|
|
|
|
|
|
buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
|
|
|
|
if (!buf)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
err = -EINVAL;
|
|
|
|
if (size < sizeof(outarg))
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
err = fuse_copy_one(cs, &outarg, sizeof(outarg));
|
|
|
|
if (err)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
err = -ENAMETOOLONG;
|
|
|
|
if (outarg.namelen > FUSE_NAME_MAX)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
err = -EINVAL;
|
|
|
|
if (size != sizeof(outarg) + outarg.namelen + 1)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
name.name = buf;
|
|
|
|
name.len = outarg.namelen;
|
|
|
|
err = fuse_copy_one(cs, buf, outarg.namelen + 1);
|
|
|
|
if (err)
|
|
|
|
goto err;
|
|
|
|
fuse_copy_finish(cs);
|
|
|
|
buf[outarg.namelen] = 0;
|
|
|
|
|
|
|
|
down_read(&fc->killsb);
|
|
|
|
err = -ENOENT;
|
|
|
|
if (fc->sb)
|
|
|
|
err = fuse_reverse_inval_entry(fc->sb, outarg.parent,
|
|
|
|
outarg.child, &name);
|
2009-05-31 22:13:57 +07:00
|
|
|
up_read(&fc->killsb);
|
2009-12-30 17:37:13 +07:00
|
|
|
kfree(buf);
|
2009-05-31 22:13:57 +07:00
|
|
|
return err;
|
|
|
|
|
|
|
|
err:
|
2009-12-30 17:37:13 +07:00
|
|
|
kfree(buf);
|
2009-05-31 22:13:57 +07:00
|
|
|
fuse_copy_finish(cs);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2010-07-12 19:41:40 +07:00
|
|
|
static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
|
|
|
|
struct fuse_copy_state *cs)
|
|
|
|
{
|
|
|
|
struct fuse_notify_store_out outarg;
|
|
|
|
struct inode *inode;
|
|
|
|
struct address_space *mapping;
|
|
|
|
u64 nodeid;
|
|
|
|
int err;
|
|
|
|
pgoff_t index;
|
|
|
|
unsigned int offset;
|
|
|
|
unsigned int num;
|
|
|
|
loff_t file_size;
|
|
|
|
loff_t end;
|
|
|
|
|
|
|
|
err = -EINVAL;
|
|
|
|
if (size < sizeof(outarg))
|
|
|
|
goto out_finish;
|
|
|
|
|
|
|
|
err = fuse_copy_one(cs, &outarg, sizeof(outarg));
|
|
|
|
if (err)
|
|
|
|
goto out_finish;
|
|
|
|
|
|
|
|
err = -EINVAL;
|
|
|
|
if (size - sizeof(outarg) != outarg.size)
|
|
|
|
goto out_finish;
|
|
|
|
|
|
|
|
nodeid = outarg.nodeid;
|
|
|
|
|
|
|
|
down_read(&fc->killsb);
|
|
|
|
|
|
|
|
err = -ENOENT;
|
|
|
|
if (!fc->sb)
|
|
|
|
goto out_up_killsb;
|
|
|
|
|
|
|
|
inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
|
|
|
|
if (!inode)
|
|
|
|
goto out_up_killsb;
|
|
|
|
|
|
|
|
mapping = inode->i_mapping;
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 19:29:47 +07:00
|
|
|
index = outarg.offset >> PAGE_SHIFT;
|
|
|
|
offset = outarg.offset & ~PAGE_MASK;
|
2010-07-12 19:41:40 +07:00
|
|
|
file_size = i_size_read(inode);
|
|
|
|
end = outarg.offset + outarg.size;
|
|
|
|
if (end > file_size) {
|
|
|
|
file_size = end;
|
|
|
|
fuse_write_update_size(inode, file_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
num = outarg.size;
|
|
|
|
while (num) {
|
|
|
|
struct page *page;
|
|
|
|
unsigned int this_num;
|
|
|
|
|
|
|
|
err = -ENOMEM;
|
|
|
|
page = find_or_create_page(mapping, index,
|
|
|
|
mapping_gfp_mask(mapping));
|
|
|
|
if (!page)
|
|
|
|
goto out_iput;
|
|
|
|
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 19:29:47 +07:00
|
|
|
this_num = min_t(unsigned, num, PAGE_SIZE - offset);
|
2010-07-12 19:41:40 +07:00
|
|
|
err = fuse_copy_page(cs, &page, offset, this_num, 0);
|
2014-01-23 01:36:58 +07:00
|
|
|
if (!err && offset == 0 &&
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 19:29:47 +07:00
|
|
|
(this_num == PAGE_SIZE || file_size == end))
|
2010-07-12 19:41:40 +07:00
|
|
|
SetPageUptodate(page);
|
|
|
|
unlock_page(page);
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 19:29:47 +07:00
|
|
|
put_page(page);
|
2010-07-12 19:41:40 +07:00
|
|
|
|
|
|
|
if (err)
|
|
|
|
goto out_iput;
|
|
|
|
|
|
|
|
num -= this_num;
|
|
|
|
offset = 0;
|
|
|
|
index++;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = 0;
|
|
|
|
|
|
|
|
out_iput:
|
|
|
|
iput(inode);
|
|
|
|
out_up_killsb:
|
|
|
|
up_read(&fc->killsb);
|
|
|
|
out_finish:
|
|
|
|
fuse_copy_finish(cs);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2010-07-12 19:41:40 +07:00
|
|
|
static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_req *req)
|
|
|
|
{
|
2014-06-05 06:10:22 +07:00
|
|
|
release_pages(req->pages, req->num_pages, false);
|
2010-07-12 19:41:40 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
|
|
|
|
struct fuse_notify_retrieve_out *outarg)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
struct address_space *mapping = inode->i_mapping;
|
|
|
|
struct fuse_req *req;
|
|
|
|
pgoff_t index;
|
|
|
|
loff_t file_size;
|
|
|
|
unsigned int num;
|
|
|
|
unsigned int offset;
|
2010-10-01 03:06:21 +07:00
|
|
|
size_t total_len = 0;
|
2012-10-26 22:48:42 +07:00
|
|
|
int num_pages;
|
2010-07-12 19:41:40 +07:00
|
|
|
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 19:29:47 +07:00
|
|
|
offset = outarg->offset & ~PAGE_MASK;
|
2012-10-26 22:48:42 +07:00
|
|
|
file_size = i_size_read(inode);
|
|
|
|
|
|
|
|
num = outarg->size;
|
|
|
|
if (outarg->offset > file_size)
|
|
|
|
num = 0;
|
|
|
|
else if (outarg->offset + num > file_size)
|
|
|
|
num = file_size - outarg->offset;
|
|
|
|
|
|
|
|
num_pages = (num + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
|
|
|
num_pages = min(num_pages, FUSE_MAX_PAGES_PER_REQ);
|
|
|
|
|
|
|
|
req = fuse_get_req(fc, num_pages);
|
2010-07-12 19:41:40 +07:00
|
|
|
if (IS_ERR(req))
|
|
|
|
return PTR_ERR(req);
|
|
|
|
|
|
|
|
req->in.h.opcode = FUSE_NOTIFY_REPLY;
|
|
|
|
req->in.h.nodeid = outarg->nodeid;
|
|
|
|
req->in.numargs = 2;
|
|
|
|
req->in.argpages = 1;
|
2012-10-26 22:49:24 +07:00
|
|
|
req->page_descs[0].offset = offset;
|
2010-07-12 19:41:40 +07:00
|
|
|
req->end = fuse_retrieve_end;
|
|
|
|
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 19:29:47 +07:00
|
|
|
index = outarg->offset >> PAGE_SHIFT;
|
2010-07-12 19:41:40 +07:00
|
|
|
|
2012-10-26 22:48:42 +07:00
|
|
|
while (num && req->num_pages < num_pages) {
|
2010-07-12 19:41:40 +07:00
|
|
|
struct page *page;
|
|
|
|
unsigned int this_num;
|
|
|
|
|
|
|
|
page = find_get_page(mapping, index);
|
|
|
|
if (!page)
|
|
|
|
break;
|
|
|
|
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 19:29:47 +07:00
|
|
|
this_num = min_t(unsigned, num, PAGE_SIZE - offset);
|
2010-07-12 19:41:40 +07:00
|
|
|
req->pages[req->num_pages] = page;
|
2012-10-26 22:49:33 +07:00
|
|
|
req->page_descs[req->num_pages].length = this_num;
|
2010-07-12 19:41:40 +07:00
|
|
|
req->num_pages++;
|
|
|
|
|
2012-09-04 23:45:54 +07:00
|
|
|
offset = 0;
|
2010-07-12 19:41:40 +07:00
|
|
|
num -= this_num;
|
|
|
|
total_len += this_num;
|
2011-12-13 16:36:59 +07:00
|
|
|
index++;
|
2010-07-12 19:41:40 +07:00
|
|
|
}
|
|
|
|
req->misc.retrieve_in.offset = outarg->offset;
|
|
|
|
req->misc.retrieve_in.size = total_len;
|
|
|
|
req->in.args[0].size = sizeof(req->misc.retrieve_in);
|
|
|
|
req->in.args[0].value = &req->misc.retrieve_in;
|
|
|
|
req->in.args[1].size = total_len;
|
|
|
|
|
|
|
|
err = fuse_request_send_notify_reply(fc, req, outarg->notify_unique);
|
|
|
|
if (err)
|
|
|
|
fuse_retrieve_end(fc, req);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int fuse_notify_retrieve(struct fuse_conn *fc, unsigned int size,
|
|
|
|
struct fuse_copy_state *cs)
|
|
|
|
{
|
|
|
|
struct fuse_notify_retrieve_out outarg;
|
|
|
|
struct inode *inode;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = -EINVAL;
|
|
|
|
if (size != sizeof(outarg))
|
|
|
|
goto copy_finish;
|
|
|
|
|
|
|
|
err = fuse_copy_one(cs, &outarg, sizeof(outarg));
|
|
|
|
if (err)
|
|
|
|
goto copy_finish;
|
|
|
|
|
|
|
|
fuse_copy_finish(cs);
|
|
|
|
|
|
|
|
down_read(&fc->killsb);
|
|
|
|
err = -ENOENT;
|
|
|
|
if (fc->sb) {
|
|
|
|
u64 nodeid = outarg.nodeid;
|
|
|
|
|
|
|
|
inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
|
|
|
|
if (inode) {
|
|
|
|
err = fuse_retrieve(fc, inode, &outarg);
|
|
|
|
iput(inode);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
up_read(&fc->killsb);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
|
|
|
|
copy_finish:
|
|
|
|
fuse_copy_finish(cs);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2008-11-26 18:03:55 +07:00
|
|
|
static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
|
|
|
|
unsigned int size, struct fuse_copy_state *cs)
|
|
|
|
{
|
2015-02-26 17:45:47 +07:00
|
|
|
/* Don't try to move pages (yet) */
|
|
|
|
cs->move_pages = 0;
|
|
|
|
|
2008-11-26 18:03:55 +07:00
|
|
|
switch (code) {
|
2008-11-26 18:03:55 +07:00
|
|
|
case FUSE_NOTIFY_POLL:
|
|
|
|
return fuse_notify_poll(fc, size, cs);
|
|
|
|
|
2009-05-31 22:13:57 +07:00
|
|
|
case FUSE_NOTIFY_INVAL_INODE:
|
|
|
|
return fuse_notify_inval_inode(fc, size, cs);
|
|
|
|
|
|
|
|
case FUSE_NOTIFY_INVAL_ENTRY:
|
|
|
|
return fuse_notify_inval_entry(fc, size, cs);
|
|
|
|
|
2010-07-12 19:41:40 +07:00
|
|
|
case FUSE_NOTIFY_STORE:
|
|
|
|
return fuse_notify_store(fc, size, cs);
|
|
|
|
|
2010-07-12 19:41:40 +07:00
|
|
|
case FUSE_NOTIFY_RETRIEVE:
|
|
|
|
return fuse_notify_retrieve(fc, size, cs);
|
|
|
|
|
2011-12-07 03:50:06 +07:00
|
|
|
case FUSE_NOTIFY_DELETE:
|
|
|
|
return fuse_notify_delete(fc, size, cs);
|
|
|
|
|
2008-11-26 18:03:55 +07:00
|
|
|
default:
|
2009-01-26 21:00:59 +07:00
|
|
|
fuse_copy_finish(cs);
|
2008-11-26 18:03:55 +07:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-09-10 03:10:27 +07:00
|
|
|
/* Look up request on processing list by unique ID */
|
2015-07-01 21:26:04 +07:00
|
|
|
static struct fuse_req *request_find(struct fuse_pqueue *fpq, u64 unique)
|
2005-09-10 03:10:27 +07:00
|
|
|
{
|
2013-07-31 09:50:01 +07:00
|
|
|
struct fuse_req *req;
|
2005-09-10 03:10:27 +07:00
|
|
|
|
2015-07-01 21:26:04 +07:00
|
|
|
list_for_each_entry(req, &fpq->processing, list) {
|
2006-06-25 19:48:54 +07:00
|
|
|
if (req->in.h.unique == unique || req->intr_unique == unique)
|
2005-09-10 03:10:27 +07:00
|
|
|
return req;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
|
|
|
|
unsigned nbytes)
|
|
|
|
{
|
|
|
|
unsigned reqsize = sizeof(struct fuse_out_header);
|
|
|
|
|
|
|
|
if (out->h.error)
|
|
|
|
return nbytes != reqsize ? -EINVAL : 0;
|
|
|
|
|
|
|
|
reqsize += len_args(out->numargs, out->args);
|
|
|
|
|
|
|
|
if (reqsize < nbytes || (reqsize > nbytes && !out->argvar))
|
|
|
|
return -EINVAL;
|
|
|
|
else if (reqsize > nbytes) {
|
|
|
|
struct fuse_arg *lastarg = &out->args[out->numargs-1];
|
|
|
|
unsigned diffsize = reqsize - nbytes;
|
|
|
|
if (diffsize > lastarg->size)
|
|
|
|
return -EINVAL;
|
|
|
|
lastarg->size -= diffsize;
|
|
|
|
}
|
|
|
|
return fuse_copy_args(cs, out->numargs, out->argpages, out->args,
|
|
|
|
out->page_zeroing);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Write a single reply to a request. First the header is copied from
|
|
|
|
* the write buffer. The request is then searched on the processing
|
|
|
|
* list by the unique ID found in the header. If found, then remove
|
|
|
|
* it from the list and copy the rest of the buffer to the request.
|
|
|
|
* The request is finished by calling request_end()
|
|
|
|
*/
|
2015-07-01 21:26:09 +07:00
|
|
|
static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
|
fuse: support splice() writing to fuse device
Allow userspace filesystem implementation to use splice() to write to
the fuse device. The semantics of using splice() are:
1) buffer the message header and data in a temporary pipe
2) with a *single* splice() call move the message from the temporary pipe
to the fuse device
The READ reply message has the most interesting use for this, since
now the data from an arbitrary file descriptor (which could be a
regular file, a block device or a socket) can be tranferred into the
fuse device without having to go through a userspace buffer. It will
also allow zero copy moving of pages.
One caveat is that the protocol on the fuse device requires the length
of the whole message to be written into the header. But the length of
the data transferred into the temporary pipe may not be known in
advance. The current library implementation works around this by
using vmplice to write the header and modifying the header after
splicing the data into the pipe (error handling omitted):
struct fuse_out_header out;
iov.iov_base = &out;
iov.iov_len = sizeof(struct fuse_out_header);
vmsplice(pip[1], &iov, 1, 0);
len = splice(input_fd, input_offset, pip[1], NULL, len, 0);
/* retrospectively modify the header: */
out.len = len + sizeof(struct fuse_out_header);
splice(pip[0], NULL, fuse_chan_fd(req->ch), NULL, out.len, flags);
This works since vmsplice only saves a pointer to the data, it does
not copy the data itself.
Since pipes are currently limited to 16 pages and messages need to be
spliced atomically, the length of the data is limited to 15 pages (or
60kB for 4k pages).
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
2010-05-25 20:06:06 +07:00
|
|
|
struct fuse_copy_state *cs, size_t nbytes)
|
2005-09-10 03:10:27 +07:00
|
|
|
{
|
|
|
|
int err;
|
2015-07-01 21:26:09 +07:00
|
|
|
struct fuse_conn *fc = fud->fc;
|
|
|
|
struct fuse_pqueue *fpq = &fud->pq;
|
2005-09-10 03:10:27 +07:00
|
|
|
struct fuse_req *req;
|
|
|
|
struct fuse_out_header oh;
|
|
|
|
|
|
|
|
if (nbytes < sizeof(struct fuse_out_header))
|
|
|
|
return -EINVAL;
|
|
|
|
|
fuse: support splice() writing to fuse device
Allow userspace filesystem implementation to use splice() to write to
the fuse device. The semantics of using splice() are:
1) buffer the message header and data in a temporary pipe
2) with a *single* splice() call move the message from the temporary pipe
to the fuse device
The READ reply message has the most interesting use for this, since
now the data from an arbitrary file descriptor (which could be a
regular file, a block device or a socket) can be tranferred into the
fuse device without having to go through a userspace buffer. It will
also allow zero copy moving of pages.
One caveat is that the protocol on the fuse device requires the length
of the whole message to be written into the header. But the length of
the data transferred into the temporary pipe may not be known in
advance. The current library implementation works around this by
using vmplice to write the header and modifying the header after
splicing the data into the pipe (error handling omitted):
struct fuse_out_header out;
iov.iov_base = &out;
iov.iov_len = sizeof(struct fuse_out_header);
vmsplice(pip[1], &iov, 1, 0);
len = splice(input_fd, input_offset, pip[1], NULL, len, 0);
/* retrospectively modify the header: */
out.len = len + sizeof(struct fuse_out_header);
splice(pip[0], NULL, fuse_chan_fd(req->ch), NULL, out.len, flags);
This works since vmsplice only saves a pointer to the data, it does
not copy the data itself.
Since pipes are currently limited to 16 pages and messages need to be
spliced atomically, the length of the data is limited to 15 pages (or
60kB for 4k pages).
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
2010-05-25 20:06:06 +07:00
|
|
|
err = fuse_copy_one(cs, &oh, sizeof(oh));
|
2005-09-10 03:10:27 +07:00
|
|
|
if (err)
|
|
|
|
goto err_finish;
|
2008-11-26 18:03:55 +07:00
|
|
|
|
|
|
|
err = -EINVAL;
|
|
|
|
if (oh.len != nbytes)
|
|
|
|
goto err_finish;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Zero oh.unique indicates unsolicited notification message
|
|
|
|
* and error contains notification code.
|
|
|
|
*/
|
|
|
|
if (!oh.unique) {
|
fuse: support splice() writing to fuse device
Allow userspace filesystem implementation to use splice() to write to
the fuse device. The semantics of using splice() are:
1) buffer the message header and data in a temporary pipe
2) with a *single* splice() call move the message from the temporary pipe
to the fuse device
The READ reply message has the most interesting use for this, since
now the data from an arbitrary file descriptor (which could be a
regular file, a block device or a socket) can be tranferred into the
fuse device without having to go through a userspace buffer. It will
also allow zero copy moving of pages.
One caveat is that the protocol on the fuse device requires the length
of the whole message to be written into the header. But the length of
the data transferred into the temporary pipe may not be known in
advance. The current library implementation works around this by
using vmplice to write the header and modifying the header after
splicing the data into the pipe (error handling omitted):
struct fuse_out_header out;
iov.iov_base = &out;
iov.iov_len = sizeof(struct fuse_out_header);
vmsplice(pip[1], &iov, 1, 0);
len = splice(input_fd, input_offset, pip[1], NULL, len, 0);
/* retrospectively modify the header: */
out.len = len + sizeof(struct fuse_out_header);
splice(pip[0], NULL, fuse_chan_fd(req->ch), NULL, out.len, flags);
This works since vmsplice only saves a pointer to the data, it does
not copy the data itself.
Since pipes are currently limited to 16 pages and messages need to be
spliced atomically, the length of the data is limited to 15 pages (or
60kB for 4k pages).
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
2010-05-25 20:06:06 +07:00
|
|
|
err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs);
|
2008-11-26 18:03:55 +07:00
|
|
|
return err ? err : nbytes;
|
|
|
|
}
|
|
|
|
|
2005-09-10 03:10:27 +07:00
|
|
|
err = -EINVAL;
|
2008-11-26 18:03:55 +07:00
|
|
|
if (oh.error <= -1000 || oh.error > 0)
|
2005-09-10 03:10:27 +07:00
|
|
|
goto err_finish;
|
|
|
|
|
2015-07-01 21:26:06 +07:00
|
|
|
spin_lock(&fpq->lock);
|
2006-01-17 13:14:41 +07:00
|
|
|
err = -ENOENT;
|
2015-07-01 21:26:04 +07:00
|
|
|
if (!fpq->connected)
|
2015-07-01 21:26:06 +07:00
|
|
|
goto err_unlock_pq;
|
2006-01-17 13:14:41 +07:00
|
|
|
|
2015-07-01 21:26:04 +07:00
|
|
|
req = request_find(fpq, oh.unique);
|
2005-09-10 03:10:27 +07:00
|
|
|
if (!req)
|
2015-07-01 21:26:06 +07:00
|
|
|
goto err_unlock_pq;
|
2005-09-10 03:10:27 +07:00
|
|
|
|
2006-06-25 19:48:54 +07:00
|
|
|
/* Is it an interrupt reply? */
|
|
|
|
if (req->intr_unique == oh.unique) {
|
2015-07-01 21:26:06 +07:00
|
|
|
spin_unlock(&fpq->lock);
|
|
|
|
|
2006-06-25 19:48:54 +07:00
|
|
|
err = -EINVAL;
|
|
|
|
if (nbytes != sizeof(struct fuse_out_header))
|
2015-07-01 21:26:07 +07:00
|
|
|
goto err_finish;
|
2006-06-25 19:48:54 +07:00
|
|
|
|
|
|
|
if (oh.error == -ENOSYS)
|
|
|
|
fc->no_interrupt = 1;
|
|
|
|
else if (oh.error == -EAGAIN)
|
2015-07-01 21:26:01 +07:00
|
|
|
queue_interrupt(&fc->iq, req);
|
2006-06-25 19:48:54 +07:00
|
|
|
|
fuse: support splice() writing to fuse device
Allow userspace filesystem implementation to use splice() to write to
the fuse device. The semantics of using splice() are:
1) buffer the message header and data in a temporary pipe
2) with a *single* splice() call move the message from the temporary pipe
to the fuse device
The READ reply message has the most interesting use for this, since
now the data from an arbitrary file descriptor (which could be a
regular file, a block device or a socket) can be tranferred into the
fuse device without having to go through a userspace buffer. It will
also allow zero copy moving of pages.
One caveat is that the protocol on the fuse device requires the length
of the whole message to be written into the header. But the length of
the data transferred into the temporary pipe may not be known in
advance. The current library implementation works around this by
using vmplice to write the header and modifying the header after
splicing the data into the pipe (error handling omitted):
struct fuse_out_header out;
iov.iov_base = &out;
iov.iov_len = sizeof(struct fuse_out_header);
vmsplice(pip[1], &iov, 1, 0);
len = splice(input_fd, input_offset, pip[1], NULL, len, 0);
/* retrospectively modify the header: */
out.len = len + sizeof(struct fuse_out_header);
splice(pip[0], NULL, fuse_chan_fd(req->ch), NULL, out.len, flags);
This works since vmsplice only saves a pointer to the data, it does
not copy the data itself.
Since pipes are currently limited to 16 pages and messages need to be
spliced atomically, the length of the data is limited to 15 pages (or
60kB for 4k pages).
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
2010-05-25 20:06:06 +07:00
|
|
|
fuse_copy_finish(cs);
|
2006-06-25 19:48:54 +07:00
|
|
|
return nbytes;
|
|
|
|
}
|
|
|
|
|
2015-07-01 21:26:01 +07:00
|
|
|
clear_bit(FR_SENT, &req->flags);
|
2015-07-01 21:26:04 +07:00
|
|
|
list_move(&req->list, &fpq->io);
|
2005-09-10 03:10:27 +07:00
|
|
|
req->out.h = oh;
|
2015-07-01 21:25:58 +07:00
|
|
|
set_bit(FR_LOCKED, &req->flags);
|
2015-07-01 21:26:06 +07:00
|
|
|
spin_unlock(&fpq->lock);
|
fuse: support splice() writing to fuse device
Allow userspace filesystem implementation to use splice() to write to
the fuse device. The semantics of using splice() are:
1) buffer the message header and data in a temporary pipe
2) with a *single* splice() call move the message from the temporary pipe
to the fuse device
The READ reply message has the most interesting use for this, since
now the data from an arbitrary file descriptor (which could be a
regular file, a block device or a socket) can be tranferred into the
fuse device without having to go through a userspace buffer. It will
also allow zero copy moving of pages.
One caveat is that the protocol on the fuse device requires the length
of the whole message to be written into the header. But the length of
the data transferred into the temporary pipe may not be known in
advance. The current library implementation works around this by
using vmplice to write the header and modifying the header after
splicing the data into the pipe (error handling omitted):
struct fuse_out_header out;
iov.iov_base = &out;
iov.iov_len = sizeof(struct fuse_out_header);
vmsplice(pip[1], &iov, 1, 0);
len = splice(input_fd, input_offset, pip[1], NULL, len, 0);
/* retrospectively modify the header: */
out.len = len + sizeof(struct fuse_out_header);
splice(pip[0], NULL, fuse_chan_fd(req->ch), NULL, out.len, flags);
This works since vmsplice only saves a pointer to the data, it does
not copy the data itself.
Since pipes are currently limited to 16 pages and messages need to be
spliced atomically, the length of the data is limited to 15 pages (or
60kB for 4k pages).
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
2010-05-25 20:06:06 +07:00
|
|
|
cs->req = req;
|
2010-05-25 20:06:07 +07:00
|
|
|
if (!req->out.page_replace)
|
|
|
|
cs->move_pages = 0;
|
2005-09-10 03:10:27 +07:00
|
|
|
|
fuse: support splice() writing to fuse device
Allow userspace filesystem implementation to use splice() to write to
the fuse device. The semantics of using splice() are:
1) buffer the message header and data in a temporary pipe
2) with a *single* splice() call move the message from the temporary pipe
to the fuse device
The READ reply message has the most interesting use for this, since
now the data from an arbitrary file descriptor (which could be a
regular file, a block device or a socket) can be tranferred into the
fuse device without having to go through a userspace buffer. It will
also allow zero copy moving of pages.
One caveat is that the protocol on the fuse device requires the length
of the whole message to be written into the header. But the length of
the data transferred into the temporary pipe may not be known in
advance. The current library implementation works around this by
using vmplice to write the header and modifying the header after
splicing the data into the pipe (error handling omitted):
struct fuse_out_header out;
iov.iov_base = &out;
iov.iov_len = sizeof(struct fuse_out_header);
vmsplice(pip[1], &iov, 1, 0);
len = splice(input_fd, input_offset, pip[1], NULL, len, 0);
/* retrospectively modify the header: */
out.len = len + sizeof(struct fuse_out_header);
splice(pip[0], NULL, fuse_chan_fd(req->ch), NULL, out.len, flags);
This works since vmsplice only saves a pointer to the data, it does
not copy the data itself.
Since pipes are currently limited to 16 pages and messages need to be
spliced atomically, the length of the data is limited to 15 pages (or
60kB for 4k pages).
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
2010-05-25 20:06:06 +07:00
|
|
|
err = copy_out_args(cs, &req->out, nbytes);
|
|
|
|
fuse_copy_finish(cs);
|
2005-09-10 03:10:27 +07:00
|
|
|
|
2015-07-01 21:26:06 +07:00
|
|
|
spin_lock(&fpq->lock);
|
2015-07-01 21:25:58 +07:00
|
|
|
clear_bit(FR_LOCKED, &req->flags);
|
2015-07-01 21:26:04 +07:00
|
|
|
if (!fpq->connected)
|
2015-07-01 21:25:58 +07:00
|
|
|
err = -ENOENT;
|
|
|
|
else if (err)
|
2005-09-10 03:10:27 +07:00
|
|
|
req->out.h.error = -EIO;
|
2015-07-01 21:26:06 +07:00
|
|
|
if (!test_bit(FR_PRIVATE, &req->flags))
|
|
|
|
list_del_init(&req->list);
|
2015-07-01 21:26:06 +07:00
|
|
|
spin_unlock(&fpq->lock);
|
2015-07-01 21:26:07 +07:00
|
|
|
|
2005-09-10 03:10:27 +07:00
|
|
|
request_end(fc, req);
|
|
|
|
|
|
|
|
return err ? err : nbytes;
|
|
|
|
|
2015-07-01 21:26:06 +07:00
|
|
|
err_unlock_pq:
|
|
|
|
spin_unlock(&fpq->lock);
|
2005-09-10 03:10:27 +07:00
|
|
|
err_finish:
|
fuse: support splice() writing to fuse device
Allow userspace filesystem implementation to use splice() to write to
the fuse device. The semantics of using splice() are:
1) buffer the message header and data in a temporary pipe
2) with a *single* splice() call move the message from the temporary pipe
to the fuse device
The READ reply message has the most interesting use for this, since
now the data from an arbitrary file descriptor (which could be a
regular file, a block device or a socket) can be tranferred into the
fuse device without having to go through a userspace buffer. It will
also allow zero copy moving of pages.
One caveat is that the protocol on the fuse device requires the length
of the whole message to be written into the header. But the length of
the data transferred into the temporary pipe may not be known in
advance. The current library implementation works around this by
using vmplice to write the header and modifying the header after
splicing the data into the pipe (error handling omitted):
struct fuse_out_header out;
iov.iov_base = &out;
iov.iov_len = sizeof(struct fuse_out_header);
vmsplice(pip[1], &iov, 1, 0);
len = splice(input_fd, input_offset, pip[1], NULL, len, 0);
/* retrospectively modify the header: */
out.len = len + sizeof(struct fuse_out_header);
splice(pip[0], NULL, fuse_chan_fd(req->ch), NULL, out.len, flags);
This works since vmsplice only saves a pointer to the data, it does
not copy the data itself.
Since pipes are currently limited to 16 pages and messages need to be
spliced atomically, the length of the data is limited to 15 pages (or
60kB for 4k pages).
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
2010-05-25 20:06:06 +07:00
|
|
|
fuse_copy_finish(cs);
|
2005-09-10 03:10:27 +07:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2015-04-04 08:53:39 +07:00
|
|
|
static ssize_t fuse_dev_write(struct kiocb *iocb, struct iov_iter *from)
|
fuse: support splice() writing to fuse device
Allow userspace filesystem implementation to use splice() to write to
the fuse device. The semantics of using splice() are:
1) buffer the message header and data in a temporary pipe
2) with a *single* splice() call move the message from the temporary pipe
to the fuse device
The READ reply message has the most interesting use for this, since
now the data from an arbitrary file descriptor (which could be a
regular file, a block device or a socket) can be tranferred into the
fuse device without having to go through a userspace buffer. It will
also allow zero copy moving of pages.
One caveat is that the protocol on the fuse device requires the length
of the whole message to be written into the header. But the length of
the data transferred into the temporary pipe may not be known in
advance. The current library implementation works around this by
using vmplice to write the header and modifying the header after
splicing the data into the pipe (error handling omitted):
struct fuse_out_header out;
iov.iov_base = &out;
iov.iov_len = sizeof(struct fuse_out_header);
vmsplice(pip[1], &iov, 1, 0);
len = splice(input_fd, input_offset, pip[1], NULL, len, 0);
/* retrospectively modify the header: */
out.len = len + sizeof(struct fuse_out_header);
splice(pip[0], NULL, fuse_chan_fd(req->ch), NULL, out.len, flags);
This works since vmsplice only saves a pointer to the data, it does
not copy the data itself.
Since pipes are currently limited to 16 pages and messages need to be
spliced atomically, the length of the data is limited to 15 pages (or
60kB for 4k pages).
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
2010-05-25 20:06:06 +07:00
|
|
|
{
|
|
|
|
struct fuse_copy_state cs;
|
2015-07-01 21:26:08 +07:00
|
|
|
struct fuse_dev *fud = fuse_get_dev(iocb->ki_filp);
|
|
|
|
|
|
|
|
if (!fud)
|
fuse: support splice() writing to fuse device
Allow userspace filesystem implementation to use splice() to write to
the fuse device. The semantics of using splice() are:
1) buffer the message header and data in a temporary pipe
2) with a *single* splice() call move the message from the temporary pipe
to the fuse device
The READ reply message has the most interesting use for this, since
now the data from an arbitrary file descriptor (which could be a
regular file, a block device or a socket) can be tranferred into the
fuse device without having to go through a userspace buffer. It will
also allow zero copy moving of pages.
One caveat is that the protocol on the fuse device requires the length
of the whole message to be written into the header. But the length of
the data transferred into the temporary pipe may not be known in
advance. The current library implementation works around this by
using vmplice to write the header and modifying the header after
splicing the data into the pipe (error handling omitted):
struct fuse_out_header out;
iov.iov_base = &out;
iov.iov_len = sizeof(struct fuse_out_header);
vmsplice(pip[1], &iov, 1, 0);
len = splice(input_fd, input_offset, pip[1], NULL, len, 0);
/* retrospectively modify the header: */
out.len = len + sizeof(struct fuse_out_header);
splice(pip[0], NULL, fuse_chan_fd(req->ch), NULL, out.len, flags);
This works since vmsplice only saves a pointer to the data, it does
not copy the data itself.
Since pipes are currently limited to 16 pages and messages need to be
spliced atomically, the length of the data is limited to 15 pages (or
60kB for 4k pages).
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
2010-05-25 20:06:06 +07:00
|
|
|
return -EPERM;
|
|
|
|
|
2015-04-04 08:53:39 +07:00
|
|
|
if (!iter_is_iovec(from))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2015-07-01 21:25:58 +07:00
|
|
|
fuse_copy_init(&cs, 0, from);
|
fuse: support splice() writing to fuse device
Allow userspace filesystem implementation to use splice() to write to
the fuse device. The semantics of using splice() are:
1) buffer the message header and data in a temporary pipe
2) with a *single* splice() call move the message from the temporary pipe
to the fuse device
The READ reply message has the most interesting use for this, since
now the data from an arbitrary file descriptor (which could be a
regular file, a block device or a socket) can be tranferred into the
fuse device without having to go through a userspace buffer. It will
also allow zero copy moving of pages.
One caveat is that the protocol on the fuse device requires the length
of the whole message to be written into the header. But the length of
the data transferred into the temporary pipe may not be known in
advance. The current library implementation works around this by
using vmplice to write the header and modifying the header after
splicing the data into the pipe (error handling omitted):
struct fuse_out_header out;
iov.iov_base = &out;
iov.iov_len = sizeof(struct fuse_out_header);
vmsplice(pip[1], &iov, 1, 0);
len = splice(input_fd, input_offset, pip[1], NULL, len, 0);
/* retrospectively modify the header: */
out.len = len + sizeof(struct fuse_out_header);
splice(pip[0], NULL, fuse_chan_fd(req->ch), NULL, out.len, flags);
This works since vmsplice only saves a pointer to the data, it does
not copy the data itself.
Since pipes are currently limited to 16 pages and messages need to be
spliced atomically, the length of the data is limited to 15 pages (or
60kB for 4k pages).
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
2010-05-25 20:06:06 +07:00
|
|
|
|
2015-07-01 21:26:09 +07:00
|
|
|
return fuse_dev_do_write(fud, &cs, iov_iter_count(from));
|
fuse: support splice() writing to fuse device
Allow userspace filesystem implementation to use splice() to write to
the fuse device. The semantics of using splice() are:
1) buffer the message header and data in a temporary pipe
2) with a *single* splice() call move the message from the temporary pipe
to the fuse device
The READ reply message has the most interesting use for this, since
now the data from an arbitrary file descriptor (which could be a
regular file, a block device or a socket) can be tranferred into the
fuse device without having to go through a userspace buffer. It will
also allow zero copy moving of pages.
One caveat is that the protocol on the fuse device requires the length
of the whole message to be written into the header. But the length of
the data transferred into the temporary pipe may not be known in
advance. The current library implementation works around this by
using vmplice to write the header and modifying the header after
splicing the data into the pipe (error handling omitted):
struct fuse_out_header out;
iov.iov_base = &out;
iov.iov_len = sizeof(struct fuse_out_header);
vmsplice(pip[1], &iov, 1, 0);
len = splice(input_fd, input_offset, pip[1], NULL, len, 0);
/* retrospectively modify the header: */
out.len = len + sizeof(struct fuse_out_header);
splice(pip[0], NULL, fuse_chan_fd(req->ch), NULL, out.len, flags);
This works since vmsplice only saves a pointer to the data, it does
not copy the data itself.
Since pipes are currently limited to 16 pages and messages need to be
spliced atomically, the length of the data is limited to 15 pages (or
60kB for 4k pages).
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
2010-05-25 20:06:06 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
|
|
|
|
struct file *out, loff_t *ppos,
|
|
|
|
size_t len, unsigned int flags)
|
|
|
|
{
|
|
|
|
unsigned nbuf;
|
|
|
|
unsigned idx;
|
|
|
|
struct pipe_buffer *bufs;
|
|
|
|
struct fuse_copy_state cs;
|
2015-07-01 21:26:08 +07:00
|
|
|
struct fuse_dev *fud;
|
fuse: support splice() writing to fuse device
Allow userspace filesystem implementation to use splice() to write to
the fuse device. The semantics of using splice() are:
1) buffer the message header and data in a temporary pipe
2) with a *single* splice() call move the message from the temporary pipe
to the fuse device
The READ reply message has the most interesting use for this, since
now the data from an arbitrary file descriptor (which could be a
regular file, a block device or a socket) can be tranferred into the
fuse device without having to go through a userspace buffer. It will
also allow zero copy moving of pages.
One caveat is that the protocol on the fuse device requires the length
of the whole message to be written into the header. But the length of
the data transferred into the temporary pipe may not be known in
advance. The current library implementation works around this by
using vmplice to write the header and modifying the header after
splicing the data into the pipe (error handling omitted):
struct fuse_out_header out;
iov.iov_base = &out;
iov.iov_len = sizeof(struct fuse_out_header);
vmsplice(pip[1], &iov, 1, 0);
len = splice(input_fd, input_offset, pip[1], NULL, len, 0);
/* retrospectively modify the header: */
out.len = len + sizeof(struct fuse_out_header);
splice(pip[0], NULL, fuse_chan_fd(req->ch), NULL, out.len, flags);
This works since vmsplice only saves a pointer to the data, it does
not copy the data itself.
Since pipes are currently limited to 16 pages and messages need to be
spliced atomically, the length of the data is limited to 15 pages (or
60kB for 4k pages).
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
2010-05-25 20:06:06 +07:00
|
|
|
size_t rem;
|
|
|
|
ssize_t ret;
|
|
|
|
|
2015-07-01 21:26:08 +07:00
|
|
|
fud = fuse_get_dev(out);
|
|
|
|
if (!fud)
|
fuse: support splice() writing to fuse device
Allow userspace filesystem implementation to use splice() to write to
the fuse device. The semantics of using splice() are:
1) buffer the message header and data in a temporary pipe
2) with a *single* splice() call move the message from the temporary pipe
to the fuse device
The READ reply message has the most interesting use for this, since
now the data from an arbitrary file descriptor (which could be a
regular file, a block device or a socket) can be tranferred into the
fuse device without having to go through a userspace buffer. It will
also allow zero copy moving of pages.
One caveat is that the protocol on the fuse device requires the length
of the whole message to be written into the header. But the length of
the data transferred into the temporary pipe may not be known in
advance. The current library implementation works around this by
using vmplice to write the header and modifying the header after
splicing the data into the pipe (error handling omitted):
struct fuse_out_header out;
iov.iov_base = &out;
iov.iov_len = sizeof(struct fuse_out_header);
vmsplice(pip[1], &iov, 1, 0);
len = splice(input_fd, input_offset, pip[1], NULL, len, 0);
/* retrospectively modify the header: */
out.len = len + sizeof(struct fuse_out_header);
splice(pip[0], NULL, fuse_chan_fd(req->ch), NULL, out.len, flags);
This works since vmsplice only saves a pointer to the data, it does
not copy the data itself.
Since pipes are currently limited to 16 pages and messages need to be
spliced atomically, the length of the data is limited to 15 pages (or
60kB for 4k pages).
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
2010-05-25 20:06:06 +07:00
|
|
|
return -EPERM;
|
|
|
|
|
2010-12-08 02:16:56 +07:00
|
|
|
bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
|
fuse: support splice() writing to fuse device
Allow userspace filesystem implementation to use splice() to write to
the fuse device. The semantics of using splice() are:
1) buffer the message header and data in a temporary pipe
2) with a *single* splice() call move the message from the temporary pipe
to the fuse device
The READ reply message has the most interesting use for this, since
now the data from an arbitrary file descriptor (which could be a
regular file, a block device or a socket) can be tranferred into the
fuse device without having to go through a userspace buffer. It will
also allow zero copy moving of pages.
One caveat is that the protocol on the fuse device requires the length
of the whole message to be written into the header. But the length of
the data transferred into the temporary pipe may not be known in
advance. The current library implementation works around this by
using vmplice to write the header and modifying the header after
splicing the data into the pipe (error handling omitted):
struct fuse_out_header out;
iov.iov_base = &out;
iov.iov_len = sizeof(struct fuse_out_header);
vmsplice(pip[1], &iov, 1, 0);
len = splice(input_fd, input_offset, pip[1], NULL, len, 0);
/* retrospectively modify the header: */
out.len = len + sizeof(struct fuse_out_header);
splice(pip[0], NULL, fuse_chan_fd(req->ch), NULL, out.len, flags);
This works since vmsplice only saves a pointer to the data, it does
not copy the data itself.
Since pipes are currently limited to 16 pages and messages need to be
spliced atomically, the length of the data is limited to 15 pages (or
60kB for 4k pages).
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
2010-05-25 20:06:06 +07:00
|
|
|
if (!bufs)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
pipe_lock(pipe);
|
|
|
|
nbuf = 0;
|
|
|
|
rem = 0;
|
|
|
|
for (idx = 0; idx < pipe->nrbufs && rem < len; idx++)
|
|
|
|
rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len;
|
|
|
|
|
|
|
|
ret = -EINVAL;
|
|
|
|
if (rem < len) {
|
|
|
|
pipe_unlock(pipe);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
rem = len;
|
|
|
|
while (rem) {
|
|
|
|
struct pipe_buffer *ibuf;
|
|
|
|
struct pipe_buffer *obuf;
|
|
|
|
|
|
|
|
BUG_ON(nbuf >= pipe->buffers);
|
|
|
|
BUG_ON(!pipe->nrbufs);
|
|
|
|
ibuf = &pipe->bufs[pipe->curbuf];
|
|
|
|
obuf = &bufs[nbuf];
|
|
|
|
|
|
|
|
if (rem >= ibuf->len) {
|
|
|
|
*obuf = *ibuf;
|
|
|
|
ibuf->ops = NULL;
|
|
|
|
pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
|
|
|
|
pipe->nrbufs--;
|
|
|
|
} else {
|
|
|
|
ibuf->ops->get(pipe, ibuf);
|
|
|
|
*obuf = *ibuf;
|
|
|
|
obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
|
|
|
|
obuf->len = rem;
|
|
|
|
ibuf->offset += obuf->len;
|
|
|
|
ibuf->len -= obuf->len;
|
|
|
|
}
|
|
|
|
nbuf++;
|
|
|
|
rem -= obuf->len;
|
|
|
|
}
|
|
|
|
pipe_unlock(pipe);
|
|
|
|
|
2015-07-01 21:25:58 +07:00
|
|
|
fuse_copy_init(&cs, 0, NULL);
|
fuse: support splice() writing to fuse device
Allow userspace filesystem implementation to use splice() to write to
the fuse device. The semantics of using splice() are:
1) buffer the message header and data in a temporary pipe
2) with a *single* splice() call move the message from the temporary pipe
to the fuse device
The READ reply message has the most interesting use for this, since
now the data from an arbitrary file descriptor (which could be a
regular file, a block device or a socket) can be tranferred into the
fuse device without having to go through a userspace buffer. It will
also allow zero copy moving of pages.
One caveat is that the protocol on the fuse device requires the length
of the whole message to be written into the header. But the length of
the data transferred into the temporary pipe may not be known in
advance. The current library implementation works around this by
using vmplice to write the header and modifying the header after
splicing the data into the pipe (error handling omitted):
struct fuse_out_header out;
iov.iov_base = &out;
iov.iov_len = sizeof(struct fuse_out_header);
vmsplice(pip[1], &iov, 1, 0);
len = splice(input_fd, input_offset, pip[1], NULL, len, 0);
/* retrospectively modify the header: */
out.len = len + sizeof(struct fuse_out_header);
splice(pip[0], NULL, fuse_chan_fd(req->ch), NULL, out.len, flags);
This works since vmsplice only saves a pointer to the data, it does
not copy the data itself.
Since pipes are currently limited to 16 pages and messages need to be
spliced atomically, the length of the data is limited to 15 pages (or
60kB for 4k pages).
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
2010-05-25 20:06:06 +07:00
|
|
|
cs.pipebufs = bufs;
|
2015-04-04 09:06:08 +07:00
|
|
|
cs.nr_segs = nbuf;
|
fuse: support splice() writing to fuse device
Allow userspace filesystem implementation to use splice() to write to
the fuse device. The semantics of using splice() are:
1) buffer the message header and data in a temporary pipe
2) with a *single* splice() call move the message from the temporary pipe
to the fuse device
The READ reply message has the most interesting use for this, since
now the data from an arbitrary file descriptor (which could be a
regular file, a block device or a socket) can be tranferred into the
fuse device without having to go through a userspace buffer. It will
also allow zero copy moving of pages.
One caveat is that the protocol on the fuse device requires the length
of the whole message to be written into the header. But the length of
the data transferred into the temporary pipe may not be known in
advance. The current library implementation works around this by
using vmplice to write the header and modifying the header after
splicing the data into the pipe (error handling omitted):
struct fuse_out_header out;
iov.iov_base = &out;
iov.iov_len = sizeof(struct fuse_out_header);
vmsplice(pip[1], &iov, 1, 0);
len = splice(input_fd, input_offset, pip[1], NULL, len, 0);
/* retrospectively modify the header: */
out.len = len + sizeof(struct fuse_out_header);
splice(pip[0], NULL, fuse_chan_fd(req->ch), NULL, out.len, flags);
This works since vmsplice only saves a pointer to the data, it does
not copy the data itself.
Since pipes are currently limited to 16 pages and messages need to be
spliced atomically, the length of the data is limited to 15 pages (or
60kB for 4k pages).
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
2010-05-25 20:06:06 +07:00
|
|
|
cs.pipe = pipe;
|
|
|
|
|
2010-05-25 20:06:07 +07:00
|
|
|
if (flags & SPLICE_F_MOVE)
|
|
|
|
cs.move_pages = 1;
|
|
|
|
|
2015-07-01 21:26:09 +07:00
|
|
|
ret = fuse_dev_do_write(fud, &cs, len);
|
fuse: support splice() writing to fuse device
Allow userspace filesystem implementation to use splice() to write to
the fuse device. The semantics of using splice() are:
1) buffer the message header and data in a temporary pipe
2) with a *single* splice() call move the message from the temporary pipe
to the fuse device
The READ reply message has the most interesting use for this, since
now the data from an arbitrary file descriptor (which could be a
regular file, a block device or a socket) can be tranferred into the
fuse device without having to go through a userspace buffer. It will
also allow zero copy moving of pages.
One caveat is that the protocol on the fuse device requires the length
of the whole message to be written into the header. But the length of
the data transferred into the temporary pipe may not be known in
advance. The current library implementation works around this by
using vmplice to write the header and modifying the header after
splicing the data into the pipe (error handling omitted):
struct fuse_out_header out;
iov.iov_base = &out;
iov.iov_len = sizeof(struct fuse_out_header);
vmsplice(pip[1], &iov, 1, 0);
len = splice(input_fd, input_offset, pip[1], NULL, len, 0);
/* retrospectively modify the header: */
out.len = len + sizeof(struct fuse_out_header);
splice(pip[0], NULL, fuse_chan_fd(req->ch), NULL, out.len, flags);
This works since vmsplice only saves a pointer to the data, it does
not copy the data itself.
Since pipes are currently limited to 16 pages and messages need to be
spliced atomically, the length of the data is limited to 15 pages (or
60kB for 4k pages).
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
2010-05-25 20:06:06 +07:00
|
|
|
|
|
|
|
for (idx = 0; idx < nbuf; idx++) {
|
|
|
|
struct pipe_buffer *buf = &bufs[idx];
|
|
|
|
buf->ops->release(pipe, buf);
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
kfree(bufs);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2005-09-10 03:10:27 +07:00
|
|
|
static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
|
|
|
|
{
|
|
|
|
unsigned mask = POLLOUT | POLLWRNORM;
|
2015-07-01 21:26:01 +07:00
|
|
|
struct fuse_iqueue *fiq;
|
2015-07-01 21:26:08 +07:00
|
|
|
struct fuse_dev *fud = fuse_get_dev(file);
|
|
|
|
|
|
|
|
if (!fud)
|
2006-04-11 12:54:50 +07:00
|
|
|
return POLLERR;
|
2005-09-10 03:10:27 +07:00
|
|
|
|
2015-07-01 21:26:08 +07:00
|
|
|
fiq = &fud->fc->iq;
|
2015-07-01 21:26:01 +07:00
|
|
|
poll_wait(file, &fiq->waitq, wait);
|
2005-09-10 03:10:27 +07:00
|
|
|
|
2015-07-01 21:26:02 +07:00
|
|
|
spin_lock(&fiq->waitq.lock);
|
2015-07-01 21:26:01 +07:00
|
|
|
if (!fiq->connected)
|
2006-04-11 12:54:50 +07:00
|
|
|
mask = POLLERR;
|
2015-07-01 21:26:01 +07:00
|
|
|
else if (request_pending(fiq))
|
2006-04-11 12:54:50 +07:00
|
|
|
mask |= POLLIN | POLLRDNORM;
|
2015-07-01 21:26:02 +07:00
|
|
|
spin_unlock(&fiq->waitq.lock);
|
2005-09-10 03:10:27 +07:00
|
|
|
|
|
|
|
return mask;
|
|
|
|
}
|
|
|
|
|
2006-01-17 13:14:41 +07:00
|
|
|
/*
|
|
|
|
* Abort all requests on the given list (pending or processing)
|
|
|
|
*
|
2006-04-11 12:54:55 +07:00
|
|
|
* This function releases and reacquires fc->lock
|
2006-01-17 13:14:41 +07:00
|
|
|
*/
|
2005-09-10 03:10:27 +07:00
|
|
|
static void end_requests(struct fuse_conn *fc, struct list_head *head)
|
|
|
|
{
|
|
|
|
while (!list_empty(head)) {
|
|
|
|
struct fuse_req *req;
|
|
|
|
req = list_entry(head->next, struct fuse_req, list);
|
|
|
|
req->out.h.error = -ECONNABORTED;
|
2015-07-01 21:26:01 +07:00
|
|
|
clear_bit(FR_PENDING, &req->flags);
|
|
|
|
clear_bit(FR_SENT, &req->flags);
|
2015-07-01 21:26:04 +07:00
|
|
|
list_del_init(&req->list);
|
2005-09-10 03:10:27 +07:00
|
|
|
request_end(fc, req);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-03-02 07:43:52 +07:00
|
|
|
static void end_polls(struct fuse_conn *fc)
|
|
|
|
{
|
|
|
|
struct rb_node *p;
|
|
|
|
|
|
|
|
p = rb_first(&fc->polled_files);
|
|
|
|
|
|
|
|
while (p) {
|
|
|
|
struct fuse_file *ff;
|
|
|
|
ff = rb_entry(p, struct fuse_file, polled_node);
|
|
|
|
wake_up_interruptible_all(&ff->poll_wait);
|
|
|
|
|
|
|
|
p = rb_next(p);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-01-17 13:14:41 +07:00
|
|
|
/*
|
|
|
|
* Abort all requests.
|
|
|
|
*
|
2015-07-01 21:25:59 +07:00
|
|
|
* Emergency exit in case of a malicious or accidental deadlock, or just a hung
|
|
|
|
* filesystem.
|
|
|
|
*
|
|
|
|
* The same effect is usually achievable through killing the filesystem daemon
|
|
|
|
* and all users of the filesystem. The exception is the combination of an
|
|
|
|
* asynchronous request and the tricky deadlock (see
|
|
|
|
* Documentation/filesystems/fuse.txt).
|
2006-01-17 13:14:41 +07:00
|
|
|
*
|
2015-07-01 21:25:59 +07:00
|
|
|
* Aborting requests under I/O goes as follows: 1: Separate out unlocked
|
|
|
|
* requests, they should be finished off immediately. Locked requests will be
|
|
|
|
* finished after unlock; see unlock_request(). 2: Finish off the unlocked
|
|
|
|
* requests. It is possible that some request will finish before we can. This
|
|
|
|
* is OK, the request will in that case be removed from the list before we touch
|
|
|
|
* it.
|
2006-01-17 13:14:41 +07:00
|
|
|
*/
|
|
|
|
void fuse_abort_conn(struct fuse_conn *fc)
|
|
|
|
{
|
2015-07-01 21:26:01 +07:00
|
|
|
struct fuse_iqueue *fiq = &fc->iq;
|
|
|
|
|
2006-04-11 12:54:55 +07:00
|
|
|
spin_lock(&fc->lock);
|
2006-01-17 13:14:41 +07:00
|
|
|
if (fc->connected) {
|
2015-07-01 21:26:09 +07:00
|
|
|
struct fuse_dev *fud;
|
2015-07-01 21:25:59 +07:00
|
|
|
struct fuse_req *req, *next;
|
2015-07-01 21:25:59 +07:00
|
|
|
LIST_HEAD(to_end1);
|
|
|
|
LIST_HEAD(to_end2);
|
2015-07-01 21:25:59 +07:00
|
|
|
|
2006-01-17 13:14:41 +07:00
|
|
|
fc->connected = 0;
|
2006-06-25 19:48:50 +07:00
|
|
|
fc->blocked = 0;
|
2015-01-06 16:45:35 +07:00
|
|
|
fuse_set_initialized(fc);
|
2015-07-01 21:26:09 +07:00
|
|
|
list_for_each_entry(fud, &fc->devices, entry) {
|
|
|
|
struct fuse_pqueue *fpq = &fud->pq;
|
|
|
|
|
|
|
|
spin_lock(&fpq->lock);
|
|
|
|
fpq->connected = 0;
|
|
|
|
list_for_each_entry_safe(req, next, &fpq->io, list) {
|
|
|
|
req->out.h.error = -ECONNABORTED;
|
|
|
|
spin_lock(&req->waitq.lock);
|
|
|
|
set_bit(FR_ABORTED, &req->flags);
|
|
|
|
if (!test_bit(FR_LOCKED, &req->flags)) {
|
|
|
|
set_bit(FR_PRIVATE, &req->flags);
|
|
|
|
list_move(&req->list, &to_end1);
|
|
|
|
}
|
|
|
|
spin_unlock(&req->waitq.lock);
|
2015-07-01 21:26:06 +07:00
|
|
|
}
|
2015-07-01 21:26:09 +07:00
|
|
|
list_splice_init(&fpq->processing, &to_end2);
|
|
|
|
spin_unlock(&fpq->lock);
|
2015-07-01 21:25:59 +07:00
|
|
|
}
|
2015-07-01 21:25:59 +07:00
|
|
|
fc->max_background = UINT_MAX;
|
|
|
|
flush_bg_queue(fc);
|
2015-07-01 21:26:02 +07:00
|
|
|
|
2015-07-01 21:26:02 +07:00
|
|
|
spin_lock(&fiq->waitq.lock);
|
2015-07-01 21:26:02 +07:00
|
|
|
fiq->connected = 0;
|
2015-07-01 21:26:01 +07:00
|
|
|
list_splice_init(&fiq->pending, &to_end2);
|
2015-07-01 21:26:02 +07:00
|
|
|
while (forget_pending(fiq))
|
|
|
|
kfree(dequeue_forget(fiq, 1, NULL));
|
2015-07-01 21:26:02 +07:00
|
|
|
wake_up_all_locked(&fiq->waitq);
|
|
|
|
spin_unlock(&fiq->waitq.lock);
|
2015-07-01 21:26:02 +07:00
|
|
|
kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
|
2015-07-01 21:26:08 +07:00
|
|
|
end_polls(fc);
|
|
|
|
wake_up_all(&fc->blocked_waitq);
|
|
|
|
spin_unlock(&fc->lock);
|
2015-07-01 21:26:02 +07:00
|
|
|
|
2015-07-01 21:25:59 +07:00
|
|
|
while (!list_empty(&to_end1)) {
|
|
|
|
req = list_first_entry(&to_end1, struct fuse_req, list);
|
2015-07-01 21:25:59 +07:00
|
|
|
__fuse_get_request(req);
|
2015-07-01 21:26:04 +07:00
|
|
|
list_del_init(&req->list);
|
2015-07-01 21:25:59 +07:00
|
|
|
request_end(fc, req);
|
|
|
|
}
|
2015-07-01 21:25:59 +07:00
|
|
|
end_requests(fc, &to_end2);
|
2015-07-01 21:26:08 +07:00
|
|
|
} else {
|
|
|
|
spin_unlock(&fc->lock);
|
2006-01-17 13:14:41 +07:00
|
|
|
}
|
|
|
|
}
|
2009-04-14 08:54:53 +07:00
|
|
|
EXPORT_SYMBOL_GPL(fuse_abort_conn);
|
2006-01-17 13:14:41 +07:00
|
|
|
|
2009-04-14 08:54:53 +07:00
|
|
|
int fuse_dev_release(struct inode *inode, struct file *file)
|
2005-09-10 03:10:27 +07:00
|
|
|
{
|
2015-07-01 21:26:08 +07:00
|
|
|
struct fuse_dev *fud = fuse_get_dev(file);
|
|
|
|
|
|
|
|
if (fud) {
|
|
|
|
struct fuse_conn *fc = fud->fc;
|
2015-07-01 21:26:09 +07:00
|
|
|
struct fuse_pqueue *fpq = &fud->pq;
|
|
|
|
|
|
|
|
WARN_ON(!list_empty(&fpq->io));
|
|
|
|
end_requests(fc, &fpq->processing);
|
|
|
|
/* Are we the last open device? */
|
|
|
|
if (atomic_dec_and_test(&fc->dev_count)) {
|
|
|
|
WARN_ON(fc->iq.fasync != NULL);
|
|
|
|
fuse_abort_conn(fc);
|
|
|
|
}
|
2015-07-01 21:26:08 +07:00
|
|
|
fuse_dev_free(fud);
|
2006-04-11 12:54:52 +07:00
|
|
|
}
|
2005-09-10 03:10:27 +07:00
|
|
|
return 0;
|
|
|
|
}
|
2009-04-14 08:54:53 +07:00
|
|
|
EXPORT_SYMBOL_GPL(fuse_dev_release);
|
2005-09-10 03:10:27 +07:00
|
|
|
|
2006-04-11 12:54:52 +07:00
|
|
|
static int fuse_dev_fasync(int fd, struct file *file, int on)
|
|
|
|
{
|
2015-07-01 21:26:08 +07:00
|
|
|
struct fuse_dev *fud = fuse_get_dev(file);
|
|
|
|
|
|
|
|
if (!fud)
|
2006-04-11 12:54:56 +07:00
|
|
|
return -EPERM;
|
2006-04-11 12:54:52 +07:00
|
|
|
|
|
|
|
/* No locking - fasync_helper does its own locking */
|
2015-07-01 21:26:08 +07:00
|
|
|
return fasync_helper(fd, file, on, &fud->fc->iq.fasync);
|
2006-04-11 12:54:52 +07:00
|
|
|
}
|
|
|
|
|
2015-07-01 21:26:08 +07:00
|
|
|
static int fuse_device_clone(struct fuse_conn *fc, struct file *new)
|
|
|
|
{
|
2015-07-01 21:26:08 +07:00
|
|
|
struct fuse_dev *fud;
|
|
|
|
|
2015-07-01 21:26:08 +07:00
|
|
|
if (new->private_data)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2015-07-01 21:26:08 +07:00
|
|
|
fud = fuse_dev_alloc(fc);
|
|
|
|
if (!fud)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
new->private_data = fud;
|
2015-07-01 21:26:09 +07:00
|
|
|
atomic_inc(&fc->dev_count);
|
2015-07-01 21:26:08 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static long fuse_dev_ioctl(struct file *file, unsigned int cmd,
|
|
|
|
unsigned long arg)
|
|
|
|
{
|
|
|
|
int err = -ENOTTY;
|
|
|
|
|
|
|
|
if (cmd == FUSE_DEV_IOC_CLONE) {
|
|
|
|
int oldfd;
|
|
|
|
|
|
|
|
err = -EFAULT;
|
|
|
|
if (!get_user(oldfd, (__u32 __user *) arg)) {
|
|
|
|
struct file *old = fget(oldfd);
|
|
|
|
|
|
|
|
err = -EINVAL;
|
|
|
|
if (old) {
|
2015-08-17 01:27:01 +07:00
|
|
|
struct fuse_dev *fud = NULL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check against file->f_op because CUSE
|
|
|
|
* uses the same ioctl handler.
|
|
|
|
*/
|
|
|
|
if (old->f_op == file->f_op &&
|
|
|
|
old->f_cred->user_ns == file->f_cred->user_ns)
|
|
|
|
fud = fuse_get_dev(old);
|
2015-07-01 21:26:08 +07:00
|
|
|
|
2015-07-01 21:26:08 +07:00
|
|
|
if (fud) {
|
2015-07-01 21:26:08 +07:00
|
|
|
mutex_lock(&fuse_mutex);
|
2015-07-01 21:26:08 +07:00
|
|
|
err = fuse_device_clone(fud->fc, file);
|
2015-07-01 21:26:08 +07:00
|
|
|
mutex_unlock(&fuse_mutex);
|
|
|
|
}
|
|
|
|
fput(old);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2006-03-28 16:56:42 +07:00
|
|
|
const struct file_operations fuse_dev_operations = {
|
2005-09-10 03:10:27 +07:00
|
|
|
.owner = THIS_MODULE,
|
2015-01-12 11:22:16 +07:00
|
|
|
.open = fuse_dev_open,
|
2005-09-10 03:10:27 +07:00
|
|
|
.llseek = no_llseek,
|
2015-04-04 08:53:39 +07:00
|
|
|
.read_iter = fuse_dev_read,
|
2010-05-25 20:06:07 +07:00
|
|
|
.splice_read = fuse_dev_splice_read,
|
2015-04-04 08:53:39 +07:00
|
|
|
.write_iter = fuse_dev_write,
|
fuse: support splice() writing to fuse device
Allow userspace filesystem implementation to use splice() to write to
the fuse device. The semantics of using splice() are:
1) buffer the message header and data in a temporary pipe
2) with a *single* splice() call move the message from the temporary pipe
to the fuse device
The READ reply message has the most interesting use for this, since
now the data from an arbitrary file descriptor (which could be a
regular file, a block device or a socket) can be tranferred into the
fuse device without having to go through a userspace buffer. It will
also allow zero copy moving of pages.
One caveat is that the protocol on the fuse device requires the length
of the whole message to be written into the header. But the length of
the data transferred into the temporary pipe may not be known in
advance. The current library implementation works around this by
using vmplice to write the header and modifying the header after
splicing the data into the pipe (error handling omitted):
struct fuse_out_header out;
iov.iov_base = &out;
iov.iov_len = sizeof(struct fuse_out_header);
vmsplice(pip[1], &iov, 1, 0);
len = splice(input_fd, input_offset, pip[1], NULL, len, 0);
/* retrospectively modify the header: */
out.len = len + sizeof(struct fuse_out_header);
splice(pip[0], NULL, fuse_chan_fd(req->ch), NULL, out.len, flags);
This works since vmsplice only saves a pointer to the data, it does
not copy the data itself.
Since pipes are currently limited to 16 pages and messages need to be
spliced atomically, the length of the data is limited to 15 pages (or
60kB for 4k pages).
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
2010-05-25 20:06:06 +07:00
|
|
|
.splice_write = fuse_dev_splice_write,
|
2005-09-10 03:10:27 +07:00
|
|
|
.poll = fuse_dev_poll,
|
|
|
|
.release = fuse_dev_release,
|
2006-04-11 12:54:52 +07:00
|
|
|
.fasync = fuse_dev_fasync,
|
2015-07-01 21:26:08 +07:00
|
|
|
.unlocked_ioctl = fuse_dev_ioctl,
|
|
|
|
.compat_ioctl = fuse_dev_ioctl,
|
2005-09-10 03:10:27 +07:00
|
|
|
};
|
2009-04-14 08:54:53 +07:00
|
|
|
EXPORT_SYMBOL_GPL(fuse_dev_operations);
|
2005-09-10 03:10:27 +07:00
|
|
|
|
|
|
|
static struct miscdevice fuse_miscdevice = {
|
|
|
|
.minor = FUSE_MINOR,
|
|
|
|
.name = "fuse",
|
|
|
|
.fops = &fuse_dev_operations,
|
|
|
|
};
|
|
|
|
|
|
|
|
int __init fuse_dev_init(void)
|
|
|
|
{
|
|
|
|
int err = -ENOMEM;
|
|
|
|
fuse_req_cachep = kmem_cache_create("fuse_request",
|
|
|
|
sizeof(struct fuse_req),
|
2007-07-20 08:11:58 +07:00
|
|
|
0, 0, NULL);
|
2005-09-10 03:10:27 +07:00
|
|
|
if (!fuse_req_cachep)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
err = misc_register(&fuse_miscdevice);
|
|
|
|
if (err)
|
|
|
|
goto out_cache_clean;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_cache_clean:
|
|
|
|
kmem_cache_destroy(fuse_req_cachep);
|
|
|
|
out:
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
void fuse_dev_cleanup(void)
|
|
|
|
{
|
|
|
|
misc_deregister(&fuse_miscdevice);
|
|
|
|
kmem_cache_destroy(fuse_req_cachep);
|
|
|
|
}
|