mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 06:50:58 +07:00
ceph: include dirty xattrs state in snapped caps
When we snapshot dirty metadata that needs to be written back to the MDS, include dirty xattr metadata. Make the capsnap reference the encoded xattr blob so that it will be written back in the FLUSHSNAP op. Also fix the capsnap creation guard to include dirty auth or file bits, not just tests specific to dirty file data or file writes in progress (this fixes auth metadata writeback). Signed-off-by: Sage Weil <sage@newdream.net>
This commit is contained in:
parent
082afec92d
commit
4a625be472
@ -1282,7 +1282,7 @@ void __ceph_flush_snaps(struct ceph_inode_info *ci,
|
||||
&capsnap->mtime, &capsnap->atime,
|
||||
capsnap->time_warp_seq,
|
||||
capsnap->uid, capsnap->gid, capsnap->mode,
|
||||
0, NULL,
|
||||
capsnap->xattr_version, capsnap->xattr_blob,
|
||||
capsnap->follows);
|
||||
|
||||
next_follows = capsnap->follows + 1;
|
||||
|
@ -435,7 +435,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
|
||||
{
|
||||
struct inode *inode = &ci->vfs_inode;
|
||||
struct ceph_cap_snap *capsnap;
|
||||
int used;
|
||||
int used, dirty;
|
||||
|
||||
capsnap = kzalloc(sizeof(*capsnap), GFP_NOFS);
|
||||
if (!capsnap) {
|
||||
@ -445,6 +445,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
used = __ceph_caps_used(ci);
|
||||
dirty = __ceph_caps_dirty(ci);
|
||||
if (__ceph_have_pending_cap_snap(ci)) {
|
||||
/* there is no point in queuing multiple "pending" cap_snaps,
|
||||
as no new writes are allowed to start when pending, so any
|
||||
@ -452,11 +453,13 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
|
||||
cap_snap. lucky us. */
|
||||
dout("queue_cap_snap %p already pending\n", inode);
|
||||
kfree(capsnap);
|
||||
} else if (ci->i_wrbuffer_ref_head || (used & CEPH_CAP_FILE_WR)) {
|
||||
} else if (ci->i_wrbuffer_ref_head || (used & CEPH_CAP_FILE_WR) ||
|
||||
(dirty & (CEPH_CAP_AUTH_EXCL|CEPH_CAP_XATTR_EXCL|
|
||||
CEPH_CAP_FILE_EXCL|CEPH_CAP_FILE_WR))) {
|
||||
struct ceph_snap_context *snapc = ci->i_head_snapc;
|
||||
|
||||
igrab(inode);
|
||||
|
||||
|
||||
atomic_set(&capsnap->nref, 1);
|
||||
capsnap->ci = ci;
|
||||
INIT_LIST_HEAD(&capsnap->ci_item);
|
||||
@ -464,15 +467,21 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
|
||||
|
||||
capsnap->follows = snapc->seq - 1;
|
||||
capsnap->issued = __ceph_caps_issued(ci, NULL);
|
||||
capsnap->dirty = __ceph_caps_dirty(ci);
|
||||
capsnap->dirty = dirty;
|
||||
|
||||
capsnap->mode = inode->i_mode;
|
||||
capsnap->uid = inode->i_uid;
|
||||
capsnap->gid = inode->i_gid;
|
||||
|
||||
/* fixme? */
|
||||
capsnap->xattr_blob = NULL;
|
||||
capsnap->xattr_len = 0;
|
||||
if (dirty & CEPH_CAP_XATTR_EXCL) {
|
||||
__ceph_build_xattrs_blob(ci);
|
||||
capsnap->xattr_blob =
|
||||
ceph_buffer_get(ci->i_xattrs.blob);
|
||||
capsnap->xattr_version = ci->i_xattrs.version;
|
||||
} else {
|
||||
capsnap->xattr_blob = NULL;
|
||||
capsnap->xattr_version = 0;
|
||||
}
|
||||
|
||||
/* dirty page count moved from _head to this cap_snap;
|
||||
all subsequent writes page dirties occur _after_ this
|
||||
|
@ -216,8 +216,7 @@ struct ceph_cap_snap {
|
||||
uid_t uid;
|
||||
gid_t gid;
|
||||
|
||||
void *xattr_blob;
|
||||
int xattr_len;
|
||||
struct ceph_buffer *xattr_blob;
|
||||
u64 xattr_version;
|
||||
|
||||
u64 size;
|
||||
@ -229,8 +228,11 @@ struct ceph_cap_snap {
|
||||
|
||||
static inline void ceph_put_cap_snap(struct ceph_cap_snap *capsnap)
|
||||
{
|
||||
if (atomic_dec_and_test(&capsnap->nref))
|
||||
if (atomic_dec_and_test(&capsnap->nref)) {
|
||||
if (capsnap->xattr_blob)
|
||||
ceph_buffer_put(capsnap->xattr_blob);
|
||||
kfree(capsnap);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -485,6 +485,7 @@ void __ceph_build_xattrs_blob(struct ceph_inode_info *ci)
|
||||
ci->i_xattrs.blob = ci->i_xattrs.prealloc_blob;
|
||||
ci->i_xattrs.prealloc_blob = NULL;
|
||||
ci->i_xattrs.dirty = false;
|
||||
ci->i_xattrs.version++;
|
||||
}
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user