mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2025-03-06 09:15:12 +07:00
fuse fixes for 5.4-rc6
-----BEGIN PGP SIGNATURE----- iHUEABYIAB0WIQSQHSd0lITzzeNWNm3h3BK/laaZPAUCXbgx9wAKCRDh3BK/laaZ PIjMAQCTrWPf6pLHSoq+Pll7b8swu1m2mW97fj5k8coED1/DSQEA4222PkIdMmhu qyHnoX0fTtYXg6NnHbDVWsNL4uG8YAM= =RE9K -----END PGP SIGNATURE----- Merge tag 'fuse-fixes-5.4-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/fuse Pull fuse fixes from Miklos Szeredi: "Mostly virtiofs fixes, but also fixes a regression and couple of longstanding data/metadata writeback ordering issues" * tag 'fuse-fixes-5.4-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/fuse: fuse: redundant get_fuse_inode() calls in fuse_writepages_fill() fuse: Add changelog entries for protocols 7.1 - 7.8 fuse: truncate pending writes on O_TRUNC fuse: flush dirty data/metadata before non-truncate setattr virtiofs: Remove set but not used variable 'fc' virtiofs: Retry request submission from worker context virtiofs: Count pending forgets as in_flight forgets virtiofs: Set FR_SENT flag only after request has been sent virtiofs: No need to check fpq->connected state virtiofs: Do not end request in submission context fuse: don't advise readdirplus for negative lookup fuse: don't dereference req->args on finished request virtio-fs: don't show mount options virtio-fs: Change module name to virtiofs.ko
This commit is contained in:
commit
23fdb198ae
@ -5,6 +5,7 @@
|
||||
|
||||
obj-$(CONFIG_FUSE_FS) += fuse.o
|
||||
obj-$(CONFIG_CUSE) += cuse.o
|
||||
obj-$(CONFIG_VIRTIO_FS) += virtio_fs.o
|
||||
obj-$(CONFIG_VIRTIO_FS) += virtiofs.o
|
||||
|
||||
fuse-objs := dev.o dir.o file.o inode.o control.o xattr.o acl.o readdir.o
|
||||
virtiofs-y += virtio_fs.o
|
||||
|
@ -276,10 +276,12 @@ static void flush_bg_queue(struct fuse_conn *fc)
|
||||
void fuse_request_end(struct fuse_conn *fc, struct fuse_req *req)
|
||||
{
|
||||
struct fuse_iqueue *fiq = &fc->iq;
|
||||
bool async = req->args->end;
|
||||
bool async;
|
||||
|
||||
if (test_and_set_bit(FR_FINISHED, &req->flags))
|
||||
goto put_request;
|
||||
|
||||
async = req->args->end;
|
||||
/*
|
||||
* test_and_set_bit() implies smp_mb() between bit
|
||||
* changing and below intr_entry check. Pairs with
|
||||
|
@ -405,7 +405,8 @@ static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
|
||||
else
|
||||
fuse_invalidate_entry_cache(entry);
|
||||
|
||||
fuse_advise_use_readdirplus(dir);
|
||||
if (inode)
|
||||
fuse_advise_use_readdirplus(dir);
|
||||
return newent;
|
||||
|
||||
out_iput:
|
||||
@ -1521,6 +1522,19 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
|
||||
is_truncate = true;
|
||||
}
|
||||
|
||||
/* Flush dirty data/metadata before non-truncate SETATTR */
|
||||
if (is_wb && S_ISREG(inode->i_mode) &&
|
||||
attr->ia_valid &
|
||||
(ATTR_MODE | ATTR_UID | ATTR_GID | ATTR_MTIME_SET |
|
||||
ATTR_TIMES_SET)) {
|
||||
err = write_inode_now(inode, true);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
fuse_set_nowrite(inode);
|
||||
fuse_release_nowrite(inode);
|
||||
}
|
||||
|
||||
if (is_truncate) {
|
||||
fuse_set_nowrite(inode);
|
||||
set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
|
||||
|
@ -217,7 +217,7 @@ int fuse_open_common(struct inode *inode, struct file *file, bool isdir)
|
||||
{
|
||||
struct fuse_conn *fc = get_fuse_conn(inode);
|
||||
int err;
|
||||
bool lock_inode = (file->f_flags & O_TRUNC) &&
|
||||
bool is_wb_truncate = (file->f_flags & O_TRUNC) &&
|
||||
fc->atomic_o_trunc &&
|
||||
fc->writeback_cache;
|
||||
|
||||
@ -225,16 +225,20 @@ int fuse_open_common(struct inode *inode, struct file *file, bool isdir)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (lock_inode)
|
||||
if (is_wb_truncate) {
|
||||
inode_lock(inode);
|
||||
fuse_set_nowrite(inode);
|
||||
}
|
||||
|
||||
err = fuse_do_open(fc, get_node_id(inode), file, isdir);
|
||||
|
||||
if (!err)
|
||||
fuse_finish_open(inode, file);
|
||||
|
||||
if (lock_inode)
|
||||
if (is_wb_truncate) {
|
||||
fuse_release_nowrite(inode);
|
||||
inode_unlock(inode);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -1997,7 +2001,7 @@ static int fuse_writepages_fill(struct page *page,
|
||||
|
||||
if (!data->ff) {
|
||||
err = -EIO;
|
||||
data->ff = fuse_write_file_get(fc, get_fuse_inode(inode));
|
||||
data->ff = fuse_write_file_get(fc, fi);
|
||||
if (!data->ff)
|
||||
goto out_unlock;
|
||||
}
|
||||
@ -2042,8 +2046,6 @@ static int fuse_writepages_fill(struct page *page,
|
||||
* under writeback, so we can release the page lock.
|
||||
*/
|
||||
if (data->wpa == NULL) {
|
||||
struct fuse_inode *fi = get_fuse_inode(inode);
|
||||
|
||||
err = -ENOMEM;
|
||||
wpa = fuse_writepage_args_alloc();
|
||||
if (!wpa) {
|
||||
|
@ -479,6 +479,7 @@ struct fuse_fs_context {
|
||||
bool destroy:1;
|
||||
bool no_control:1;
|
||||
bool no_force_umount:1;
|
||||
bool no_mount_options:1;
|
||||
unsigned int max_read;
|
||||
unsigned int blksize;
|
||||
const char *subtype;
|
||||
@ -713,6 +714,9 @@ struct fuse_conn {
|
||||
/** Do not allow MNT_FORCE umount */
|
||||
unsigned int no_force_umount:1;
|
||||
|
||||
/* Do not show mount options */
|
||||
unsigned int no_mount_options:1;
|
||||
|
||||
/** The number of requests waiting for completion */
|
||||
atomic_t num_waiting;
|
||||
|
||||
|
@ -558,6 +558,9 @@ static int fuse_show_options(struct seq_file *m, struct dentry *root)
|
||||
struct super_block *sb = root->d_sb;
|
||||
struct fuse_conn *fc = get_fuse_conn_super(sb);
|
||||
|
||||
if (fc->no_mount_options)
|
||||
return 0;
|
||||
|
||||
seq_printf(m, ",user_id=%u", from_kuid_munged(fc->user_ns, fc->user_id));
|
||||
seq_printf(m, ",group_id=%u", from_kgid_munged(fc->user_ns, fc->group_id));
|
||||
if (fc->default_permissions)
|
||||
@ -1180,6 +1183,7 @@ int fuse_fill_super_common(struct super_block *sb, struct fuse_fs_context *ctx)
|
||||
fc->destroy = ctx->destroy;
|
||||
fc->no_control = ctx->no_control;
|
||||
fc->no_force_umount = ctx->no_force_umount;
|
||||
fc->no_mount_options = ctx->no_mount_options;
|
||||
|
||||
err = -ENOMEM;
|
||||
root = fuse_get_root_inode(sb, ctx->rootmode);
|
||||
|
@ -30,6 +30,7 @@ struct virtio_fs_vq {
|
||||
struct virtqueue *vq; /* protected by ->lock */
|
||||
struct work_struct done_work;
|
||||
struct list_head queued_reqs;
|
||||
struct list_head end_reqs; /* End these requests */
|
||||
struct delayed_work dispatch_work;
|
||||
struct fuse_dev *fud;
|
||||
bool connected;
|
||||
@ -54,6 +55,9 @@ struct virtio_fs_forget {
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
|
||||
struct fuse_req *req, bool in_flight);
|
||||
|
||||
static inline struct virtio_fs_vq *vq_to_fsvq(struct virtqueue *vq)
|
||||
{
|
||||
struct virtio_fs *fs = vq->vdev->priv;
|
||||
@ -66,6 +70,19 @@ static inline struct fuse_pqueue *vq_to_fpq(struct virtqueue *vq)
|
||||
return &vq_to_fsvq(vq)->fud->pq;
|
||||
}
|
||||
|
||||
/* Should be called with fsvq->lock held. */
|
||||
static inline void inc_in_flight_req(struct virtio_fs_vq *fsvq)
|
||||
{
|
||||
fsvq->in_flight++;
|
||||
}
|
||||
|
||||
/* Should be called with fsvq->lock held. */
|
||||
static inline void dec_in_flight_req(struct virtio_fs_vq *fsvq)
|
||||
{
|
||||
WARN_ON(fsvq->in_flight <= 0);
|
||||
fsvq->in_flight--;
|
||||
}
|
||||
|
||||
static void release_virtio_fs_obj(struct kref *ref)
|
||||
{
|
||||
struct virtio_fs *vfs = container_of(ref, struct virtio_fs, refcount);
|
||||
@ -109,22 +126,6 @@ static void virtio_fs_drain_queue(struct virtio_fs_vq *fsvq)
|
||||
flush_delayed_work(&fsvq->dispatch_work);
|
||||
}
|
||||
|
||||
static inline void drain_hiprio_queued_reqs(struct virtio_fs_vq *fsvq)
|
||||
{
|
||||
struct virtio_fs_forget *forget;
|
||||
|
||||
spin_lock(&fsvq->lock);
|
||||
while (1) {
|
||||
forget = list_first_entry_or_null(&fsvq->queued_reqs,
|
||||
struct virtio_fs_forget, list);
|
||||
if (!forget)
|
||||
break;
|
||||
list_del(&forget->list);
|
||||
kfree(forget);
|
||||
}
|
||||
spin_unlock(&fsvq->lock);
|
||||
}
|
||||
|
||||
static void virtio_fs_drain_all_queues(struct virtio_fs *fs)
|
||||
{
|
||||
struct virtio_fs_vq *fsvq;
|
||||
@ -132,9 +133,6 @@ static void virtio_fs_drain_all_queues(struct virtio_fs *fs)
|
||||
|
||||
for (i = 0; i < fs->nvqs; i++) {
|
||||
fsvq = &fs->vqs[i];
|
||||
if (i == VQ_HIPRIO)
|
||||
drain_hiprio_queued_reqs(fsvq);
|
||||
|
||||
virtio_fs_drain_queue(fsvq);
|
||||
}
|
||||
}
|
||||
@ -253,14 +251,66 @@ static void virtio_fs_hiprio_done_work(struct work_struct *work)
|
||||
|
||||
while ((req = virtqueue_get_buf(vq, &len)) != NULL) {
|
||||
kfree(req);
|
||||
fsvq->in_flight--;
|
||||
dec_in_flight_req(fsvq);
|
||||
}
|
||||
} while (!virtqueue_enable_cb(vq) && likely(!virtqueue_is_broken(vq)));
|
||||
spin_unlock(&fsvq->lock);
|
||||
}
|
||||
|
||||
static void virtio_fs_dummy_dispatch_work(struct work_struct *work)
|
||||
static void virtio_fs_request_dispatch_work(struct work_struct *work)
|
||||
{
|
||||
struct fuse_req *req;
|
||||
struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
|
||||
dispatch_work.work);
|
||||
struct fuse_conn *fc = fsvq->fud->fc;
|
||||
int ret;
|
||||
|
||||
pr_debug("virtio-fs: worker %s called.\n", __func__);
|
||||
while (1) {
|
||||
spin_lock(&fsvq->lock);
|
||||
req = list_first_entry_or_null(&fsvq->end_reqs, struct fuse_req,
|
||||
list);
|
||||
if (!req) {
|
||||
spin_unlock(&fsvq->lock);
|
||||
break;
|
||||
}
|
||||
|
||||
list_del_init(&req->list);
|
||||
spin_unlock(&fsvq->lock);
|
||||
fuse_request_end(fc, req);
|
||||
}
|
||||
|
||||
/* Dispatch pending requests */
|
||||
while (1) {
|
||||
spin_lock(&fsvq->lock);
|
||||
req = list_first_entry_or_null(&fsvq->queued_reqs,
|
||||
struct fuse_req, list);
|
||||
if (!req) {
|
||||
spin_unlock(&fsvq->lock);
|
||||
return;
|
||||
}
|
||||
list_del_init(&req->list);
|
||||
spin_unlock(&fsvq->lock);
|
||||
|
||||
ret = virtio_fs_enqueue_req(fsvq, req, true);
|
||||
if (ret < 0) {
|
||||
if (ret == -ENOMEM || ret == -ENOSPC) {
|
||||
spin_lock(&fsvq->lock);
|
||||
list_add_tail(&req->list, &fsvq->queued_reqs);
|
||||
schedule_delayed_work(&fsvq->dispatch_work,
|
||||
msecs_to_jiffies(1));
|
||||
spin_unlock(&fsvq->lock);
|
||||
return;
|
||||
}
|
||||
req->out.h.error = ret;
|
||||
spin_lock(&fsvq->lock);
|
||||
dec_in_flight_req(fsvq);
|
||||
spin_unlock(&fsvq->lock);
|
||||
pr_err("virtio-fs: virtio_fs_enqueue_req() failed %d\n",
|
||||
ret);
|
||||
fuse_request_end(fc, req);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void virtio_fs_hiprio_dispatch_work(struct work_struct *work)
|
||||
@ -286,6 +336,7 @@ static void virtio_fs_hiprio_dispatch_work(struct work_struct *work)
|
||||
|
||||
list_del(&forget->list);
|
||||
if (!fsvq->connected) {
|
||||
dec_in_flight_req(fsvq);
|
||||
spin_unlock(&fsvq->lock);
|
||||
kfree(forget);
|
||||
continue;
|
||||
@ -307,13 +358,13 @@ static void virtio_fs_hiprio_dispatch_work(struct work_struct *work)
|
||||
} else {
|
||||
pr_debug("virtio-fs: Could not queue FORGET: err=%d. Dropping it.\n",
|
||||
ret);
|
||||
dec_in_flight_req(fsvq);
|
||||
kfree(forget);
|
||||
}
|
||||
spin_unlock(&fsvq->lock);
|
||||
return;
|
||||
}
|
||||
|
||||
fsvq->in_flight++;
|
||||
notify = virtqueue_kick_prepare(vq);
|
||||
spin_unlock(&fsvq->lock);
|
||||
|
||||
@ -452,7 +503,7 @@ static void virtio_fs_requests_done_work(struct work_struct *work)
|
||||
|
||||
fuse_request_end(fc, req);
|
||||
spin_lock(&fsvq->lock);
|
||||
fsvq->in_flight--;
|
||||
dec_in_flight_req(fsvq);
|
||||
spin_unlock(&fsvq->lock);
|
||||
}
|
||||
}
|
||||
@ -502,6 +553,7 @@ static int virtio_fs_setup_vqs(struct virtio_device *vdev,
|
||||
names[VQ_HIPRIO] = fs->vqs[VQ_HIPRIO].name;
|
||||
INIT_WORK(&fs->vqs[VQ_HIPRIO].done_work, virtio_fs_hiprio_done_work);
|
||||
INIT_LIST_HEAD(&fs->vqs[VQ_HIPRIO].queued_reqs);
|
||||
INIT_LIST_HEAD(&fs->vqs[VQ_HIPRIO].end_reqs);
|
||||
INIT_DELAYED_WORK(&fs->vqs[VQ_HIPRIO].dispatch_work,
|
||||
virtio_fs_hiprio_dispatch_work);
|
||||
spin_lock_init(&fs->vqs[VQ_HIPRIO].lock);
|
||||
@ -511,8 +563,9 @@ static int virtio_fs_setup_vqs(struct virtio_device *vdev,
|
||||
spin_lock_init(&fs->vqs[i].lock);
|
||||
INIT_WORK(&fs->vqs[i].done_work, virtio_fs_requests_done_work);
|
||||
INIT_DELAYED_WORK(&fs->vqs[i].dispatch_work,
|
||||
virtio_fs_dummy_dispatch_work);
|
||||
virtio_fs_request_dispatch_work);
|
||||
INIT_LIST_HEAD(&fs->vqs[i].queued_reqs);
|
||||
INIT_LIST_HEAD(&fs->vqs[i].end_reqs);
|
||||
snprintf(fs->vqs[i].name, sizeof(fs->vqs[i].name),
|
||||
"requests.%u", i - VQ_REQUEST);
|
||||
callbacks[i] = virtio_fs_vq_done;
|
||||
@ -708,6 +761,7 @@ __releases(fiq->lock)
|
||||
list_add_tail(&forget->list, &fsvq->queued_reqs);
|
||||
schedule_delayed_work(&fsvq->dispatch_work,
|
||||
msecs_to_jiffies(1));
|
||||
inc_in_flight_req(fsvq);
|
||||
} else {
|
||||
pr_debug("virtio-fs: Could not queue FORGET: err=%d. Dropping it.\n",
|
||||
ret);
|
||||
@ -717,7 +771,7 @@ __releases(fiq->lock)
|
||||
goto out;
|
||||
}
|
||||
|
||||
fsvq->in_flight++;
|
||||
inc_in_flight_req(fsvq);
|
||||
notify = virtqueue_kick_prepare(vq);
|
||||
|
||||
spin_unlock(&fsvq->lock);
|
||||
@ -819,7 +873,7 @@ static unsigned int sg_init_fuse_args(struct scatterlist *sg,
|
||||
|
||||
/* Add a request to a virtqueue and kick the device */
|
||||
static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
|
||||
struct fuse_req *req)
|
||||
struct fuse_req *req, bool in_flight)
|
||||
{
|
||||
/* requests need at least 4 elements */
|
||||
struct scatterlist *stack_sgs[6];
|
||||
@ -835,6 +889,7 @@ static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
|
||||
unsigned int i;
|
||||
int ret;
|
||||
bool notify;
|
||||
struct fuse_pqueue *fpq;
|
||||
|
||||
/* Does the sglist fit on the stack? */
|
||||
total_sgs = sg_count_fuse_req(req);
|
||||
@ -889,7 +944,17 @@ static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
|
||||
goto out;
|
||||
}
|
||||
|
||||
fsvq->in_flight++;
|
||||
/* Request successfully sent. */
|
||||
fpq = &fsvq->fud->pq;
|
||||
spin_lock(&fpq->lock);
|
||||
list_add_tail(&req->list, fpq->processing);
|
||||
spin_unlock(&fpq->lock);
|
||||
set_bit(FR_SENT, &req->flags);
|
||||
/* matches barrier in request_wait_answer() */
|
||||
smp_mb__after_atomic();
|
||||
|
||||
if (!in_flight)
|
||||
inc_in_flight_req(fsvq);
|
||||
notify = virtqueue_kick_prepare(vq);
|
||||
|
||||
spin_unlock(&fsvq->lock);
|
||||
@ -915,9 +980,8 @@ __releases(fiq->lock)
|
||||
{
|
||||
unsigned int queue_id = VQ_REQUEST; /* TODO multiqueue */
|
||||
struct virtio_fs *fs;
|
||||
struct fuse_conn *fc;
|
||||
struct fuse_req *req;
|
||||
struct fuse_pqueue *fpq;
|
||||
struct virtio_fs_vq *fsvq;
|
||||
int ret;
|
||||
|
||||
WARN_ON(list_empty(&fiq->pending));
|
||||
@ -928,44 +992,36 @@ __releases(fiq->lock)
|
||||
spin_unlock(&fiq->lock);
|
||||
|
||||
fs = fiq->priv;
|
||||
fc = fs->vqs[queue_id].fud->fc;
|
||||
|
||||
pr_debug("%s: opcode %u unique %#llx nodeid %#llx in.len %u out.len %u\n",
|
||||
__func__, req->in.h.opcode, req->in.h.unique,
|
||||
req->in.h.nodeid, req->in.h.len,
|
||||
fuse_len_args(req->args->out_numargs, req->args->out_args));
|
||||
|
||||
fpq = &fs->vqs[queue_id].fud->pq;
|
||||
spin_lock(&fpq->lock);
|
||||
if (!fpq->connected) {
|
||||
spin_unlock(&fpq->lock);
|
||||
req->out.h.error = -ENODEV;
|
||||
pr_err("virtio-fs: %s disconnected\n", __func__);
|
||||
fuse_request_end(fc, req);
|
||||
return;
|
||||
}
|
||||
list_add_tail(&req->list, fpq->processing);
|
||||
spin_unlock(&fpq->lock);
|
||||
set_bit(FR_SENT, &req->flags);
|
||||
/* matches barrier in request_wait_answer() */
|
||||
smp_mb__after_atomic();
|
||||
|
||||
retry:
|
||||
ret = virtio_fs_enqueue_req(&fs->vqs[queue_id], req);
|
||||
fsvq = &fs->vqs[queue_id];
|
||||
ret = virtio_fs_enqueue_req(fsvq, req, false);
|
||||
if (ret < 0) {
|
||||
if (ret == -ENOMEM || ret == -ENOSPC) {
|
||||
/* Virtqueue full. Retry submission */
|
||||
/* TODO use completion instead of timeout */
|
||||
usleep_range(20, 30);
|
||||
goto retry;
|
||||
/*
|
||||
* Virtqueue full. Retry submission from worker
|
||||
* context as we might be holding fc->bg_lock.
|
||||
*/
|
||||
spin_lock(&fsvq->lock);
|
||||
list_add_tail(&req->list, &fsvq->queued_reqs);
|
||||
inc_in_flight_req(fsvq);
|
||||
schedule_delayed_work(&fsvq->dispatch_work,
|
||||
msecs_to_jiffies(1));
|
||||
spin_unlock(&fsvq->lock);
|
||||
return;
|
||||
}
|
||||
req->out.h.error = ret;
|
||||
pr_err("virtio-fs: virtio_fs_enqueue_req() failed %d\n", ret);
|
||||
spin_lock(&fpq->lock);
|
||||
clear_bit(FR_SENT, &req->flags);
|
||||
list_del_init(&req->list);
|
||||
spin_unlock(&fpq->lock);
|
||||
fuse_request_end(fc, req);
|
||||
|
||||
/* Can't end request in submission context. Use a worker */
|
||||
spin_lock(&fsvq->lock);
|
||||
list_add_tail(&req->list, &fsvq->end_reqs);
|
||||
schedule_delayed_work(&fsvq->dispatch_work, 0);
|
||||
spin_unlock(&fsvq->lock);
|
||||
return;
|
||||
}
|
||||
}
|
||||
@ -992,6 +1048,7 @@ static int virtio_fs_fill_super(struct super_block *sb)
|
||||
.destroy = true,
|
||||
.no_control = true,
|
||||
.no_force_umount = true,
|
||||
.no_mount_options = true,
|
||||
};
|
||||
|
||||
mutex_lock(&virtio_fs_mutex);
|
||||
|
@ -38,6 +38,43 @@
|
||||
*
|
||||
* Protocol changelog:
|
||||
*
|
||||
* 7.1:
|
||||
* - add the following messages:
|
||||
* FUSE_SETATTR, FUSE_SYMLINK, FUSE_MKNOD, FUSE_MKDIR, FUSE_UNLINK,
|
||||
* FUSE_RMDIR, FUSE_RENAME, FUSE_LINK, FUSE_OPEN, FUSE_READ, FUSE_WRITE,
|
||||
* FUSE_RELEASE, FUSE_FSYNC, FUSE_FLUSH, FUSE_SETXATTR, FUSE_GETXATTR,
|
||||
* FUSE_LISTXATTR, FUSE_REMOVEXATTR, FUSE_OPENDIR, FUSE_READDIR,
|
||||
* FUSE_RELEASEDIR
|
||||
* - add padding to messages to accommodate 32-bit servers on 64-bit kernels
|
||||
*
|
||||
* 7.2:
|
||||
* - add FOPEN_DIRECT_IO and FOPEN_KEEP_CACHE flags
|
||||
* - add FUSE_FSYNCDIR message
|
||||
*
|
||||
* 7.3:
|
||||
* - add FUSE_ACCESS message
|
||||
* - add FUSE_CREATE message
|
||||
* - add filehandle to fuse_setattr_in
|
||||
*
|
||||
* 7.4:
|
||||
* - add frsize to fuse_kstatfs
|
||||
* - clean up request size limit checking
|
||||
*
|
||||
* 7.5:
|
||||
* - add flags and max_write to fuse_init_out
|
||||
*
|
||||
* 7.6:
|
||||
* - add max_readahead to fuse_init_in and fuse_init_out
|
||||
*
|
||||
* 7.7:
|
||||
* - add FUSE_INTERRUPT message
|
||||
* - add POSIX file lock support
|
||||
*
|
||||
* 7.8:
|
||||
* - add lock_owner and flags fields to fuse_release_in
|
||||
* - add FUSE_BMAP message
|
||||
* - add FUSE_DESTROY message
|
||||
*
|
||||
* 7.9:
|
||||
* - new fuse_getattr_in input argument of GETATTR
|
||||
* - add lk_flags in fuse_lk_in
|
||||
|
Loading…
Reference in New Issue
Block a user