mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 07:50:53 +07:00
Merge branch 'master' of git://oss.sgi.com/xfs/xfs into for-linus
This commit is contained in:
commit
fd40261354
@ -39,6 +39,7 @@ config XFS_QUOTA
|
||||
config XFS_POSIX_ACL
|
||||
bool "XFS POSIX ACL support"
|
||||
depends on XFS_FS
|
||||
select FS_POSIX_ACL
|
||||
help
|
||||
POSIX Access Control Lists (ACLs) support permissions for users and
|
||||
groups beyond the owner/group/world scheme.
|
||||
|
@ -40,7 +40,7 @@ xfs-$(CONFIG_PROC_FS) += quota/xfs_qm_stats.o
|
||||
endif
|
||||
|
||||
xfs-$(CONFIG_XFS_RT) += xfs_rtalloc.o
|
||||
xfs-$(CONFIG_XFS_POSIX_ACL) += xfs_acl.o
|
||||
xfs-$(CONFIG_XFS_POSIX_ACL) += $(XFS_LINUX)/xfs_acl.o
|
||||
xfs-$(CONFIG_PROC_FS) += $(XFS_LINUX)/xfs_stats.o
|
||||
xfs-$(CONFIG_SYSCTL) += $(XFS_LINUX)/xfs_sysctl.o
|
||||
xfs-$(CONFIG_COMPAT) += $(XFS_LINUX)/xfs_ioctl32.o
|
||||
@ -88,8 +88,7 @@ xfs-y += xfs_alloc.o \
|
||||
xfs_utils.o \
|
||||
xfs_vnodeops.o \
|
||||
xfs_rw.o \
|
||||
xfs_dmops.o \
|
||||
xfs_qmops.o
|
||||
xfs_dmops.o
|
||||
|
||||
xfs-$(CONFIG_XFS_TRACE) += xfs_btree_trace.o \
|
||||
xfs_dir2_trace.o
|
||||
|
523
fs/xfs/linux-2.6/xfs_acl.c
Normal file
523
fs/xfs/linux-2.6/xfs_acl.c
Normal file
@ -0,0 +1,523 @@
|
||||
/*
|
||||
* Copyright (c) 2008, Christoph Hellwig
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it would be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
#include "xfs.h"
|
||||
#include "xfs_acl.h"
|
||||
#include "xfs_attr.h"
|
||||
#include "xfs_bmap_btree.h"
|
||||
#include "xfs_inode.h"
|
||||
#include "xfs_vnodeops.h"
|
||||
#include <linux/xattr.h>
|
||||
#include <linux/posix_acl_xattr.h>
|
||||
|
||||
|
||||
#define XFS_ACL_NOT_CACHED ((void *)-1)
|
||||
|
||||
/*
|
||||
* Locking scheme:
|
||||
* - all ACL updates are protected by inode->i_mutex, which is taken before
|
||||
* calling into this file.
|
||||
* - access and updates to the ip->i_acl and ip->i_default_acl pointers are
|
||||
* protected by inode->i_lock.
|
||||
*/
|
||||
|
||||
STATIC struct posix_acl *
|
||||
xfs_acl_from_disk(struct xfs_acl *aclp)
|
||||
{
|
||||
struct posix_acl_entry *acl_e;
|
||||
struct posix_acl *acl;
|
||||
struct xfs_acl_entry *ace;
|
||||
int count, i;
|
||||
|
||||
count = be32_to_cpu(aclp->acl_cnt);
|
||||
|
||||
acl = posix_acl_alloc(count, GFP_KERNEL);
|
||||
if (!acl)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
acl_e = &acl->a_entries[i];
|
||||
ace = &aclp->acl_entry[i];
|
||||
|
||||
/*
|
||||
* The tag is 32 bits on disk and 16 bits in core.
|
||||
*
|
||||
* Because every access to it goes through the core
|
||||
* format first this is not a problem.
|
||||
*/
|
||||
acl_e->e_tag = be32_to_cpu(ace->ae_tag);
|
||||
acl_e->e_perm = be16_to_cpu(ace->ae_perm);
|
||||
|
||||
switch (acl_e->e_tag) {
|
||||
case ACL_USER:
|
||||
case ACL_GROUP:
|
||||
acl_e->e_id = be32_to_cpu(ace->ae_id);
|
||||
break;
|
||||
case ACL_USER_OBJ:
|
||||
case ACL_GROUP_OBJ:
|
||||
case ACL_MASK:
|
||||
case ACL_OTHER:
|
||||
acl_e->e_id = ACL_UNDEFINED_ID;
|
||||
break;
|
||||
default:
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
return acl;
|
||||
|
||||
fail:
|
||||
posix_acl_release(acl);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
STATIC void
|
||||
xfs_acl_to_disk(struct xfs_acl *aclp, const struct posix_acl *acl)
|
||||
{
|
||||
const struct posix_acl_entry *acl_e;
|
||||
struct xfs_acl_entry *ace;
|
||||
int i;
|
||||
|
||||
aclp->acl_cnt = cpu_to_be32(acl->a_count);
|
||||
for (i = 0; i < acl->a_count; i++) {
|
||||
ace = &aclp->acl_entry[i];
|
||||
acl_e = &acl->a_entries[i];
|
||||
|
||||
ace->ae_tag = cpu_to_be32(acl_e->e_tag);
|
||||
ace->ae_id = cpu_to_be32(acl_e->e_id);
|
||||
ace->ae_perm = cpu_to_be16(acl_e->e_perm);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Update the cached ACL pointer in the inode.
|
||||
*
|
||||
* Because we don't hold any locks while reading/writing the attribute
|
||||
* from/to disk another thread could have raced and updated the cached
|
||||
* ACL value before us. In that case we release the previous cached value
|
||||
* and update it with our new value.
|
||||
*/
|
||||
STATIC void
|
||||
xfs_update_cached_acl(struct inode *inode, struct posix_acl **p_acl,
|
||||
struct posix_acl *acl)
|
||||
{
|
||||
spin_lock(&inode->i_lock);
|
||||
if (*p_acl && *p_acl != XFS_ACL_NOT_CACHED)
|
||||
posix_acl_release(*p_acl);
|
||||
*p_acl = posix_acl_dup(acl);
|
||||
spin_unlock(&inode->i_lock);
|
||||
}
|
||||
|
||||
struct posix_acl *
|
||||
xfs_get_acl(struct inode *inode, int type)
|
||||
{
|
||||
struct xfs_inode *ip = XFS_I(inode);
|
||||
struct posix_acl *acl = NULL, **p_acl;
|
||||
struct xfs_acl *xfs_acl;
|
||||
int len = sizeof(struct xfs_acl);
|
||||
char *ea_name;
|
||||
int error;
|
||||
|
||||
switch (type) {
|
||||
case ACL_TYPE_ACCESS:
|
||||
ea_name = SGI_ACL_FILE;
|
||||
p_acl = &ip->i_acl;
|
||||
break;
|
||||
case ACL_TYPE_DEFAULT:
|
||||
ea_name = SGI_ACL_DEFAULT;
|
||||
p_acl = &ip->i_default_acl;
|
||||
break;
|
||||
default:
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
if (*p_acl != XFS_ACL_NOT_CACHED)
|
||||
acl = posix_acl_dup(*p_acl);
|
||||
spin_unlock(&inode->i_lock);
|
||||
|
||||
/*
|
||||
* If we have a cached ACLs value just return it, not need to
|
||||
* go out to the disk.
|
||||
*/
|
||||
if (acl)
|
||||
return acl;
|
||||
|
||||
xfs_acl = kzalloc(sizeof(struct xfs_acl), GFP_KERNEL);
|
||||
if (!xfs_acl)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
error = -xfs_attr_get(ip, ea_name, (char *)xfs_acl, &len, ATTR_ROOT);
|
||||
if (error) {
|
||||
/*
|
||||
* If the attribute doesn't exist make sure we have a negative
|
||||
* cache entry, for any other error assume it is transient and
|
||||
* leave the cache entry as XFS_ACL_NOT_CACHED.
|
||||
*/
|
||||
if (error == -ENOATTR) {
|
||||
acl = NULL;
|
||||
goto out_update_cache;
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
|
||||
acl = xfs_acl_from_disk(xfs_acl);
|
||||
if (IS_ERR(acl))
|
||||
goto out;
|
||||
|
||||
out_update_cache:
|
||||
xfs_update_cached_acl(inode, p_acl, acl);
|
||||
out:
|
||||
kfree(xfs_acl);
|
||||
return acl;
|
||||
}
|
||||
|
||||
STATIC int
|
||||
xfs_set_acl(struct inode *inode, int type, struct posix_acl *acl)
|
||||
{
|
||||
struct xfs_inode *ip = XFS_I(inode);
|
||||
struct posix_acl **p_acl;
|
||||
char *ea_name;
|
||||
int error;
|
||||
|
||||
if (S_ISLNK(inode->i_mode))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
switch (type) {
|
||||
case ACL_TYPE_ACCESS:
|
||||
ea_name = SGI_ACL_FILE;
|
||||
p_acl = &ip->i_acl;
|
||||
break;
|
||||
case ACL_TYPE_DEFAULT:
|
||||
if (!S_ISDIR(inode->i_mode))
|
||||
return acl ? -EACCES : 0;
|
||||
ea_name = SGI_ACL_DEFAULT;
|
||||
p_acl = &ip->i_default_acl;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (acl) {
|
||||
struct xfs_acl *xfs_acl;
|
||||
int len;
|
||||
|
||||
xfs_acl = kzalloc(sizeof(struct xfs_acl), GFP_KERNEL);
|
||||
if (!xfs_acl)
|
||||
return -ENOMEM;
|
||||
|
||||
xfs_acl_to_disk(xfs_acl, acl);
|
||||
len = sizeof(struct xfs_acl) -
|
||||
(sizeof(struct xfs_acl_entry) *
|
||||
(XFS_ACL_MAX_ENTRIES - acl->a_count));
|
||||
|
||||
error = -xfs_attr_set(ip, ea_name, (char *)xfs_acl,
|
||||
len, ATTR_ROOT);
|
||||
|
||||
kfree(xfs_acl);
|
||||
} else {
|
||||
/*
|
||||
* A NULL ACL argument means we want to remove the ACL.
|
||||
*/
|
||||
error = -xfs_attr_remove(ip, ea_name, ATTR_ROOT);
|
||||
|
||||
/*
|
||||
* If the attribute didn't exist to start with that's fine.
|
||||
*/
|
||||
if (error == -ENOATTR)
|
||||
error = 0;
|
||||
}
|
||||
|
||||
if (!error)
|
||||
xfs_update_cached_acl(inode, p_acl, acl);
|
||||
return error;
|
||||
}
|
||||
|
||||
int
|
||||
xfs_check_acl(struct inode *inode, int mask)
|
||||
{
|
||||
struct xfs_inode *ip = XFS_I(inode);
|
||||
struct posix_acl *acl;
|
||||
int error = -EAGAIN;
|
||||
|
||||
xfs_itrace_entry(ip);
|
||||
|
||||
/*
|
||||
* If there is no attribute fork no ACL exists on this inode and
|
||||
* we can skip the whole exercise.
|
||||
*/
|
||||
if (!XFS_IFORK_Q(ip))
|
||||
return -EAGAIN;
|
||||
|
||||
acl = xfs_get_acl(inode, ACL_TYPE_ACCESS);
|
||||
if (IS_ERR(acl))
|
||||
return PTR_ERR(acl);
|
||||
if (acl) {
|
||||
error = posix_acl_permission(inode, acl, mask);
|
||||
posix_acl_release(acl);
|
||||
}
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
static int
|
||||
xfs_set_mode(struct inode *inode, mode_t mode)
|
||||
{
|
||||
int error = 0;
|
||||
|
||||
if (mode != inode->i_mode) {
|
||||
struct iattr iattr;
|
||||
|
||||
iattr.ia_valid = ATTR_MODE;
|
||||
iattr.ia_mode = mode;
|
||||
|
||||
error = -xfs_setattr(XFS_I(inode), &iattr, XFS_ATTR_NOACL);
|
||||
}
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
static int
|
||||
xfs_acl_exists(struct inode *inode, char *name)
|
||||
{
|
||||
int len = sizeof(struct xfs_acl);
|
||||
|
||||
return (xfs_attr_get(XFS_I(inode), name, NULL, &len,
|
||||
ATTR_ROOT|ATTR_KERNOVAL) == 0);
|
||||
}
|
||||
|
||||
int
|
||||
posix_acl_access_exists(struct inode *inode)
|
||||
{
|
||||
return xfs_acl_exists(inode, SGI_ACL_FILE);
|
||||
}
|
||||
|
||||
int
|
||||
posix_acl_default_exists(struct inode *inode)
|
||||
{
|
||||
if (!S_ISDIR(inode->i_mode))
|
||||
return 0;
|
||||
return xfs_acl_exists(inode, SGI_ACL_DEFAULT);
|
||||
}
|
||||
|
||||
/*
|
||||
* No need for i_mutex because the inode is not yet exposed to the VFS.
|
||||
*/
|
||||
int
|
||||
xfs_inherit_acl(struct inode *inode, struct posix_acl *default_acl)
|
||||
{
|
||||
struct posix_acl *clone;
|
||||
mode_t mode;
|
||||
int error = 0, inherit = 0;
|
||||
|
||||
if (S_ISDIR(inode->i_mode)) {
|
||||
error = xfs_set_acl(inode, ACL_TYPE_DEFAULT, default_acl);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
|
||||
clone = posix_acl_clone(default_acl, GFP_KERNEL);
|
||||
if (!clone)
|
||||
return -ENOMEM;
|
||||
|
||||
mode = inode->i_mode;
|
||||
error = posix_acl_create_masq(clone, &mode);
|
||||
if (error < 0)
|
||||
goto out_release_clone;
|
||||
|
||||
/*
|
||||
* If posix_acl_create_masq returns a positive value we need to
|
||||
* inherit a permission that can't be represented using the Unix
|
||||
* mode bits and we actually need to set an ACL.
|
||||
*/
|
||||
if (error > 0)
|
||||
inherit = 1;
|
||||
|
||||
error = xfs_set_mode(inode, mode);
|
||||
if (error)
|
||||
goto out_release_clone;
|
||||
|
||||
if (inherit)
|
||||
error = xfs_set_acl(inode, ACL_TYPE_ACCESS, clone);
|
||||
|
||||
out_release_clone:
|
||||
posix_acl_release(clone);
|
||||
return error;
|
||||
}
|
||||
|
||||
int
|
||||
xfs_acl_chmod(struct inode *inode)
|
||||
{
|
||||
struct posix_acl *acl, *clone;
|
||||
int error;
|
||||
|
||||
if (S_ISLNK(inode->i_mode))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
acl = xfs_get_acl(inode, ACL_TYPE_ACCESS);
|
||||
if (IS_ERR(acl) || !acl)
|
||||
return PTR_ERR(acl);
|
||||
|
||||
clone = posix_acl_clone(acl, GFP_KERNEL);
|
||||
posix_acl_release(acl);
|
||||
if (!clone)
|
||||
return -ENOMEM;
|
||||
|
||||
error = posix_acl_chmod_masq(clone, inode->i_mode);
|
||||
if (!error)
|
||||
error = xfs_set_acl(inode, ACL_TYPE_ACCESS, clone);
|
||||
|
||||
posix_acl_release(clone);
|
||||
return error;
|
||||
}
|
||||
|
||||
void
|
||||
xfs_inode_init_acls(struct xfs_inode *ip)
|
||||
{
|
||||
/*
|
||||
* No need for locking, inode is not live yet.
|
||||
*/
|
||||
ip->i_acl = XFS_ACL_NOT_CACHED;
|
||||
ip->i_default_acl = XFS_ACL_NOT_CACHED;
|
||||
}
|
||||
|
||||
void
|
||||
xfs_inode_clear_acls(struct xfs_inode *ip)
|
||||
{
|
||||
/*
|
||||
* No need for locking here, the inode is not live anymore
|
||||
* and just about to be freed.
|
||||
*/
|
||||
if (ip->i_acl != XFS_ACL_NOT_CACHED)
|
||||
posix_acl_release(ip->i_acl);
|
||||
if (ip->i_default_acl != XFS_ACL_NOT_CACHED)
|
||||
posix_acl_release(ip->i_default_acl);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* System xattr handlers.
|
||||
*
|
||||
* Currently Posix ACLs are the only system namespace extended attribute
|
||||
* handlers supported by XFS, so we just implement the handlers here.
|
||||
* If we ever support other system extended attributes this will need
|
||||
* some refactoring.
|
||||
*/
|
||||
|
||||
static int
|
||||
xfs_decode_acl(const char *name)
|
||||
{
|
||||
if (strcmp(name, "posix_acl_access") == 0)
|
||||
return ACL_TYPE_ACCESS;
|
||||
else if (strcmp(name, "posix_acl_default") == 0)
|
||||
return ACL_TYPE_DEFAULT;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int
|
||||
xfs_xattr_system_get(struct inode *inode, const char *name,
|
||||
void *value, size_t size)
|
||||
{
|
||||
struct posix_acl *acl;
|
||||
int type, error;
|
||||
|
||||
type = xfs_decode_acl(name);
|
||||
if (type < 0)
|
||||
return type;
|
||||
|
||||
acl = xfs_get_acl(inode, type);
|
||||
if (IS_ERR(acl))
|
||||
return PTR_ERR(acl);
|
||||
if (acl == NULL)
|
||||
return -ENODATA;
|
||||
|
||||
error = posix_acl_to_xattr(acl, value, size);
|
||||
posix_acl_release(acl);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
static int
|
||||
xfs_xattr_system_set(struct inode *inode, const char *name,
|
||||
const void *value, size_t size, int flags)
|
||||
{
|
||||
struct posix_acl *acl = NULL;
|
||||
int error = 0, type;
|
||||
|
||||
type = xfs_decode_acl(name);
|
||||
if (type < 0)
|
||||
return type;
|
||||
if (flags & XATTR_CREATE)
|
||||
return -EINVAL;
|
||||
if (type == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode))
|
||||
return value ? -EACCES : 0;
|
||||
if ((current_fsuid() != inode->i_uid) && !capable(CAP_FOWNER))
|
||||
return -EPERM;
|
||||
|
||||
if (!value)
|
||||
goto set_acl;
|
||||
|
||||
acl = posix_acl_from_xattr(value, size);
|
||||
if (!acl) {
|
||||
/*
|
||||
* acl_set_file(3) may request that we set default ACLs with
|
||||
* zero length -- defend (gracefully) against that here.
|
||||
*/
|
||||
goto out;
|
||||
}
|
||||
if (IS_ERR(acl)) {
|
||||
error = PTR_ERR(acl);
|
||||
goto out;
|
||||
}
|
||||
|
||||
error = posix_acl_valid(acl);
|
||||
if (error)
|
||||
goto out_release;
|
||||
|
||||
error = -EINVAL;
|
||||
if (acl->a_count > XFS_ACL_MAX_ENTRIES)
|
||||
goto out_release;
|
||||
|
||||
if (type == ACL_TYPE_ACCESS) {
|
||||
mode_t mode = inode->i_mode;
|
||||
error = posix_acl_equiv_mode(acl, &mode);
|
||||
|
||||
if (error <= 0) {
|
||||
posix_acl_release(acl);
|
||||
acl = NULL;
|
||||
|
||||
if (error < 0)
|
||||
return error;
|
||||
}
|
||||
|
||||
error = xfs_set_mode(inode, mode);
|
||||
if (error)
|
||||
goto out_release;
|
||||
}
|
||||
|
||||
set_acl:
|
||||
error = xfs_set_acl(inode, type, acl);
|
||||
out_release:
|
||||
posix_acl_release(acl);
|
||||
out:
|
||||
return error;
|
||||
}
|
||||
|
||||
struct xattr_handler xfs_xattr_system_handler = {
|
||||
.prefix = XATTR_SYSTEM_PREFIX,
|
||||
.get = xfs_xattr_system_get,
|
||||
.set = xfs_xattr_system_set,
|
||||
};
|
@ -41,7 +41,6 @@
|
||||
#include "xfs_itable.h"
|
||||
#include "xfs_error.h"
|
||||
#include "xfs_rw.h"
|
||||
#include "xfs_acl.h"
|
||||
#include "xfs_attr.h"
|
||||
#include "xfs_bmap.h"
|
||||
#include "xfs_buf_item.h"
|
||||
@ -899,7 +898,8 @@ xfs_ioctl_setattr(
|
||||
struct xfs_mount *mp = ip->i_mount;
|
||||
struct xfs_trans *tp;
|
||||
unsigned int lock_flags = 0;
|
||||
struct xfs_dquot *udqp = NULL, *gdqp = NULL;
|
||||
struct xfs_dquot *udqp = NULL;
|
||||
struct xfs_dquot *gdqp = NULL;
|
||||
struct xfs_dquot *olddquot = NULL;
|
||||
int code;
|
||||
|
||||
@ -919,7 +919,7 @@ xfs_ioctl_setattr(
|
||||
* because the i_*dquot fields will get updated anyway.
|
||||
*/
|
||||
if (XFS_IS_QUOTA_ON(mp) && (mask & FSX_PROJID)) {
|
||||
code = XFS_QM_DQVOPALLOC(mp, ip, ip->i_d.di_uid,
|
||||
code = xfs_qm_vop_dqalloc(ip, ip->i_d.di_uid,
|
||||
ip->i_d.di_gid, fa->fsx_projid,
|
||||
XFS_QMOPT_PQUOTA, &udqp, &gdqp);
|
||||
if (code)
|
||||
@ -954,10 +954,11 @@ xfs_ioctl_setattr(
|
||||
* Do a quota reservation only if projid is actually going to change.
|
||||
*/
|
||||
if (mask & FSX_PROJID) {
|
||||
if (XFS_IS_PQUOTA_ON(mp) &&
|
||||
if (XFS_IS_QUOTA_RUNNING(mp) &&
|
||||
XFS_IS_PQUOTA_ON(mp) &&
|
||||
ip->i_d.di_projid != fa->fsx_projid) {
|
||||
ASSERT(tp);
|
||||
code = XFS_QM_DQVOPCHOWNRESV(mp, tp, ip, udqp, gdqp,
|
||||
code = xfs_qm_vop_chown_reserve(tp, ip, udqp, gdqp,
|
||||
capable(CAP_FOWNER) ?
|
||||
XFS_QMOPT_FORCE_RES : 0);
|
||||
if (code) /* out of quota */
|
||||
@ -1059,8 +1060,8 @@ xfs_ioctl_setattr(
|
||||
* in the transaction.
|
||||
*/
|
||||
if (ip->i_d.di_projid != fa->fsx_projid) {
|
||||
if (XFS_IS_PQUOTA_ON(mp)) {
|
||||
olddquot = XFS_QM_DQVOPCHOWN(mp, tp, ip,
|
||||
if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_PQUOTA_ON(mp)) {
|
||||
olddquot = xfs_qm_vop_chown(tp, ip,
|
||||
&ip->i_gdquot, gdqp);
|
||||
}
|
||||
ip->i_d.di_projid = fa->fsx_projid;
|
||||
@ -1106,9 +1107,9 @@ xfs_ioctl_setattr(
|
||||
/*
|
||||
* Release any dquot(s) the inode had kept before chown.
|
||||
*/
|
||||
XFS_QM_DQRELE(mp, olddquot);
|
||||
XFS_QM_DQRELE(mp, udqp);
|
||||
XFS_QM_DQRELE(mp, gdqp);
|
||||
xfs_qm_dqrele(olddquot);
|
||||
xfs_qm_dqrele(udqp);
|
||||
xfs_qm_dqrele(gdqp);
|
||||
|
||||
if (code)
|
||||
return code;
|
||||
@ -1122,8 +1123,8 @@ xfs_ioctl_setattr(
|
||||
return 0;
|
||||
|
||||
error_return:
|
||||
XFS_QM_DQRELE(mp, udqp);
|
||||
XFS_QM_DQRELE(mp, gdqp);
|
||||
xfs_qm_dqrele(udqp);
|
||||
xfs_qm_dqrele(gdqp);
|
||||
xfs_trans_cancel(tp, 0);
|
||||
if (lock_flags)
|
||||
xfs_iunlock(ip, lock_flags);
|
||||
|
@ -17,6 +17,7 @@
|
||||
*/
|
||||
#include "xfs.h"
|
||||
#include "xfs_fs.h"
|
||||
#include "xfs_acl.h"
|
||||
#include "xfs_bit.h"
|
||||
#include "xfs_log.h"
|
||||
#include "xfs_inum.h"
|
||||
@ -51,6 +52,7 @@
|
||||
#include <linux/capability.h>
|
||||
#include <linux/xattr.h>
|
||||
#include <linux/namei.h>
|
||||
#include <linux/posix_acl.h>
|
||||
#include <linux/security.h>
|
||||
#include <linux/falloc.h>
|
||||
#include <linux/fiemap.h>
|
||||
@ -202,9 +204,8 @@ xfs_vn_mknod(
|
||||
{
|
||||
struct inode *inode;
|
||||
struct xfs_inode *ip = NULL;
|
||||
xfs_acl_t *default_acl = NULL;
|
||||
struct posix_acl *default_acl = NULL;
|
||||
struct xfs_name name;
|
||||
int (*test_default_acl)(struct inode *) = _ACL_DEFAULT_EXISTS;
|
||||
int error;
|
||||
|
||||
/*
|
||||
@ -219,18 +220,14 @@ xfs_vn_mknod(
|
||||
rdev = 0;
|
||||
}
|
||||
|
||||
if (test_default_acl && test_default_acl(dir)) {
|
||||
if (!_ACL_ALLOC(default_acl)) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (!_ACL_GET_DEFAULT(dir, default_acl)) {
|
||||
_ACL_FREE(default_acl);
|
||||
default_acl = NULL;
|
||||
}
|
||||
}
|
||||
if (IS_POSIXACL(dir)) {
|
||||
default_acl = xfs_get_acl(dir, ACL_TYPE_DEFAULT);
|
||||
if (IS_ERR(default_acl))
|
||||
return -PTR_ERR(default_acl);
|
||||
|
||||
if (IS_POSIXACL(dir) && !default_acl)
|
||||
mode &= ~current_umask();
|
||||
if (!default_acl)
|
||||
mode &= ~current_umask();
|
||||
}
|
||||
|
||||
xfs_dentry_to_name(&name, dentry);
|
||||
error = xfs_create(XFS_I(dir), &name, mode, rdev, &ip, NULL);
|
||||
@ -244,10 +241,10 @@ xfs_vn_mknod(
|
||||
goto out_cleanup_inode;
|
||||
|
||||
if (default_acl) {
|
||||
error = _ACL_INHERIT(inode, mode, default_acl);
|
||||
error = -xfs_inherit_acl(inode, default_acl);
|
||||
if (unlikely(error))
|
||||
goto out_cleanup_inode;
|
||||
_ACL_FREE(default_acl);
|
||||
posix_acl_release(default_acl);
|
||||
}
|
||||
|
||||
|
||||
@ -257,8 +254,7 @@ xfs_vn_mknod(
|
||||
out_cleanup_inode:
|
||||
xfs_cleanup_inode(dir, inode, dentry);
|
||||
out_free_acl:
|
||||
if (default_acl)
|
||||
_ACL_FREE(default_acl);
|
||||
posix_acl_release(default_acl);
|
||||
return -error;
|
||||
}
|
||||
|
||||
@ -488,26 +484,6 @@ xfs_vn_put_link(
|
||||
kfree(s);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_XFS_POSIX_ACL
|
||||
STATIC int
|
||||
xfs_check_acl(
|
||||
struct inode *inode,
|
||||
int mask)
|
||||
{
|
||||
struct xfs_inode *ip = XFS_I(inode);
|
||||
int error;
|
||||
|
||||
xfs_itrace_entry(ip);
|
||||
|
||||
if (XFS_IFORK_Q(ip)) {
|
||||
error = xfs_acl_iaccess(ip, mask, NULL);
|
||||
if (error != -1)
|
||||
return -error;
|
||||
}
|
||||
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
STATIC int
|
||||
xfs_vn_permission(
|
||||
struct inode *inode,
|
||||
@ -515,9 +491,6 @@ xfs_vn_permission(
|
||||
{
|
||||
return generic_permission(inode, mask, xfs_check_acl);
|
||||
}
|
||||
#else
|
||||
#define xfs_vn_permission NULL
|
||||
#endif
|
||||
|
||||
STATIC int
|
||||
xfs_vn_getattr(
|
||||
|
@ -42,7 +42,6 @@
|
||||
#include "xfs_error.h"
|
||||
#include "xfs_itable.h"
|
||||
#include "xfs_rw.h"
|
||||
#include "xfs_acl.h"
|
||||
#include "xfs_attr.h"
|
||||
#include "xfs_inode_item.h"
|
||||
#include "xfs_buf_item.h"
|
||||
|
@ -50,9 +50,11 @@ xfs_fs_quota_sync(
|
||||
{
|
||||
struct xfs_mount *mp = XFS_M(sb);
|
||||
|
||||
if (sb->s_flags & MS_RDONLY)
|
||||
return -EROFS;
|
||||
if (!XFS_IS_QUOTA_RUNNING(mp))
|
||||
return -ENOSYS;
|
||||
return -xfs_sync_inodes(mp, SYNC_DELWRI);
|
||||
return -xfs_sync_data(mp, 0);
|
||||
}
|
||||
|
||||
STATIC int
|
||||
|
@ -43,7 +43,6 @@
|
||||
#include "xfs_itable.h"
|
||||
#include "xfs_fsops.h"
|
||||
#include "xfs_rw.h"
|
||||
#include "xfs_acl.h"
|
||||
#include "xfs_attr.h"
|
||||
#include "xfs_buf_item.h"
|
||||
#include "xfs_utils.h"
|
||||
@ -405,6 +404,14 @@ xfs_parseargs(
|
||||
return EINVAL;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_XFS_QUOTA
|
||||
if (XFS_IS_QUOTA_RUNNING(mp)) {
|
||||
cmn_err(CE_WARN,
|
||||
"XFS: quota support not available in this kernel.");
|
||||
return EINVAL;
|
||||
}
|
||||
#endif
|
||||
|
||||
if ((mp->m_qflags & (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE)) &&
|
||||
(mp->m_qflags & (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE))) {
|
||||
cmn_err(CE_WARN,
|
||||
@ -1063,7 +1070,18 @@ xfs_fs_put_super(
|
||||
int unmount_event_flags = 0;
|
||||
|
||||
xfs_syncd_stop(mp);
|
||||
xfs_sync_inodes(mp, SYNC_ATTR|SYNC_DELWRI);
|
||||
|
||||
if (!(sb->s_flags & MS_RDONLY)) {
|
||||
/*
|
||||
* XXX(hch): this should be SYNC_WAIT.
|
||||
*
|
||||
* Or more likely not needed at all because the VFS is already
|
||||
* calling ->sync_fs after shutting down all filestem
|
||||
* operations and just before calling ->put_super.
|
||||
*/
|
||||
xfs_sync_data(mp, 0);
|
||||
xfs_sync_attr(mp, 0);
|
||||
}
|
||||
|
||||
#ifdef HAVE_DMAPI
|
||||
if (mp->m_flags & XFS_MOUNT_DMAPI) {
|
||||
@ -1098,7 +1116,6 @@ xfs_fs_put_super(
|
||||
xfs_freesb(mp);
|
||||
xfs_icsb_destroy_counters(mp);
|
||||
xfs_close_devices(mp);
|
||||
xfs_qmops_put(mp);
|
||||
xfs_dmops_put(mp);
|
||||
xfs_free_fsname(mp);
|
||||
kfree(mp);
|
||||
@ -1158,6 +1175,7 @@ xfs_fs_statfs(
|
||||
{
|
||||
struct xfs_mount *mp = XFS_M(dentry->d_sb);
|
||||
xfs_sb_t *sbp = &mp->m_sb;
|
||||
struct xfs_inode *ip = XFS_I(dentry->d_inode);
|
||||
__uint64_t fakeinos, id;
|
||||
xfs_extlen_t lsize;
|
||||
|
||||
@ -1186,7 +1204,10 @@ xfs_fs_statfs(
|
||||
statp->f_ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree);
|
||||
spin_unlock(&mp->m_sb_lock);
|
||||
|
||||
XFS_QM_DQSTATVFS(XFS_I(dentry->d_inode), statp);
|
||||
if ((ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) ||
|
||||
((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_OQUOTA_ENFD))) ==
|
||||
(XFS_PQUOTA_ACCT|XFS_OQUOTA_ENFD))
|
||||
xfs_qm_statvfs(ip, statp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1394,16 +1415,13 @@ xfs_fs_fill_super(
|
||||
error = xfs_dmops_get(mp);
|
||||
if (error)
|
||||
goto out_free_fsname;
|
||||
error = xfs_qmops_get(mp);
|
||||
if (error)
|
||||
goto out_put_dmops;
|
||||
|
||||
if (silent)
|
||||
flags |= XFS_MFSI_QUIET;
|
||||
|
||||
error = xfs_open_devices(mp);
|
||||
if (error)
|
||||
goto out_put_qmops;
|
||||
goto out_put_dmops;
|
||||
|
||||
if (xfs_icsb_init_counters(mp))
|
||||
mp->m_flags |= XFS_MOUNT_NO_PERCPU_SB;
|
||||
@ -1471,8 +1489,6 @@ xfs_fs_fill_super(
|
||||
out_destroy_counters:
|
||||
xfs_icsb_destroy_counters(mp);
|
||||
xfs_close_devices(mp);
|
||||
out_put_qmops:
|
||||
xfs_qmops_put(mp);
|
||||
out_put_dmops:
|
||||
xfs_dmops_put(mp);
|
||||
out_free_fsname:
|
||||
@ -1706,18 +1722,8 @@ xfs_init_zones(void)
|
||||
if (!xfs_ili_zone)
|
||||
goto out_destroy_inode_zone;
|
||||
|
||||
#ifdef CONFIG_XFS_POSIX_ACL
|
||||
xfs_acl_zone = kmem_zone_init(sizeof(xfs_acl_t), "xfs_acl");
|
||||
if (!xfs_acl_zone)
|
||||
goto out_destroy_ili_zone;
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
|
||||
#ifdef CONFIG_XFS_POSIX_ACL
|
||||
out_destroy_ili_zone:
|
||||
#endif
|
||||
kmem_zone_destroy(xfs_ili_zone);
|
||||
out_destroy_inode_zone:
|
||||
kmem_zone_destroy(xfs_inode_zone);
|
||||
out_destroy_efi_zone:
|
||||
@ -1751,9 +1757,6 @@ xfs_init_zones(void)
|
||||
STATIC void
|
||||
xfs_destroy_zones(void)
|
||||
{
|
||||
#ifdef CONFIG_XFS_POSIX_ACL
|
||||
kmem_zone_destroy(xfs_acl_zone);
|
||||
#endif
|
||||
kmem_zone_destroy(xfs_ili_zone);
|
||||
kmem_zone_destroy(xfs_inode_zone);
|
||||
kmem_zone_destroy(xfs_efi_zone);
|
||||
|
@ -43,168 +43,269 @@
|
||||
#include "xfs_buf_item.h"
|
||||
#include "xfs_inode_item.h"
|
||||
#include "xfs_rw.h"
|
||||
#include "xfs_quota.h"
|
||||
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/freezer.h>
|
||||
|
||||
/*
|
||||
* Sync all the inodes in the given AG according to the
|
||||
* direction given by the flags.
|
||||
*/
|
||||
STATIC int
|
||||
xfs_sync_inodes_ag(
|
||||
xfs_mount_t *mp,
|
||||
int ag,
|
||||
int flags)
|
||||
|
||||
STATIC xfs_inode_t *
|
||||
xfs_inode_ag_lookup(
|
||||
struct xfs_mount *mp,
|
||||
struct xfs_perag *pag,
|
||||
uint32_t *first_index,
|
||||
int tag)
|
||||
{
|
||||
xfs_perag_t *pag = &mp->m_perag[ag];
|
||||
int nr_found;
|
||||
uint32_t first_index = 0;
|
||||
int error = 0;
|
||||
int last_error = 0;
|
||||
int nr_found;
|
||||
struct xfs_inode *ip;
|
||||
|
||||
do {
|
||||
struct inode *inode;
|
||||
xfs_inode_t *ip = NULL;
|
||||
int lock_flags = XFS_ILOCK_SHARED;
|
||||
|
||||
/*
|
||||
* use a gang lookup to find the next inode in the tree
|
||||
* as the tree is sparse and a gang lookup walks to find
|
||||
* the number of objects requested.
|
||||
*/
|
||||
read_lock(&pag->pag_ici_lock);
|
||||
/*
|
||||
* use a gang lookup to find the next inode in the tree
|
||||
* as the tree is sparse and a gang lookup walks to find
|
||||
* the number of objects requested.
|
||||
*/
|
||||
read_lock(&pag->pag_ici_lock);
|
||||
if (tag == XFS_ICI_NO_TAG) {
|
||||
nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
|
||||
(void**)&ip, first_index, 1);
|
||||
(void **)&ip, *first_index, 1);
|
||||
} else {
|
||||
nr_found = radix_tree_gang_lookup_tag(&pag->pag_ici_root,
|
||||
(void **)&ip, *first_index, 1, tag);
|
||||
}
|
||||
if (!nr_found)
|
||||
goto unlock;
|
||||
|
||||
if (!nr_found) {
|
||||
read_unlock(&pag->pag_ici_lock);
|
||||
/*
|
||||
* Update the index for the next lookup. Catch overflows
|
||||
* into the next AG range which can occur if we have inodes
|
||||
* in the last block of the AG and we are currently
|
||||
* pointing to the last inode.
|
||||
*/
|
||||
*first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
|
||||
if (*first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
|
||||
goto unlock;
|
||||
|
||||
return ip;
|
||||
|
||||
unlock:
|
||||
read_unlock(&pag->pag_ici_lock);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
STATIC int
|
||||
xfs_inode_ag_walk(
|
||||
struct xfs_mount *mp,
|
||||
xfs_agnumber_t ag,
|
||||
int (*execute)(struct xfs_inode *ip,
|
||||
struct xfs_perag *pag, int flags),
|
||||
int flags,
|
||||
int tag)
|
||||
{
|
||||
struct xfs_perag *pag = &mp->m_perag[ag];
|
||||
uint32_t first_index;
|
||||
int last_error = 0;
|
||||
int skipped;
|
||||
|
||||
restart:
|
||||
skipped = 0;
|
||||
first_index = 0;
|
||||
do {
|
||||
int error = 0;
|
||||
xfs_inode_t *ip;
|
||||
|
||||
ip = xfs_inode_ag_lookup(mp, pag, &first_index, tag);
|
||||
if (!ip)
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* Update the index for the next lookup. Catch overflows
|
||||
* into the next AG range which can occur if we have inodes
|
||||
* in the last block of the AG and we are currently
|
||||
* pointing to the last inode.
|
||||
*/
|
||||
first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
|
||||
if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) {
|
||||
read_unlock(&pag->pag_ici_lock);
|
||||
break;
|
||||
}
|
||||
|
||||
/* nothing to sync during shutdown */
|
||||
if (XFS_FORCED_SHUTDOWN(mp)) {
|
||||
read_unlock(&pag->pag_ici_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we can't get a reference on the inode, it must be
|
||||
* in reclaim. Leave it for the reclaim code to flush.
|
||||
*/
|
||||
inode = VFS_I(ip);
|
||||
if (!igrab(inode)) {
|
||||
read_unlock(&pag->pag_ici_lock);
|
||||
error = execute(ip, pag, flags);
|
||||
if (error == EAGAIN) {
|
||||
skipped++;
|
||||
continue;
|
||||
}
|
||||
read_unlock(&pag->pag_ici_lock);
|
||||
|
||||
/* avoid new or bad inodes */
|
||||
if (is_bad_inode(inode) ||
|
||||
xfs_iflags_test(ip, XFS_INEW)) {
|
||||
IRELE(ip);
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we have to flush data or wait for I/O completion
|
||||
* we need to hold the iolock.
|
||||
*/
|
||||
if (flags & SYNC_DELWRI) {
|
||||
if (VN_DIRTY(inode)) {
|
||||
if (flags & SYNC_TRYLOCK) {
|
||||
if (xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED))
|
||||
lock_flags |= XFS_IOLOCK_SHARED;
|
||||
} else {
|
||||
xfs_ilock(ip, XFS_IOLOCK_SHARED);
|
||||
lock_flags |= XFS_IOLOCK_SHARED;
|
||||
}
|
||||
if (lock_flags & XFS_IOLOCK_SHARED) {
|
||||
error = xfs_flush_pages(ip, 0, -1,
|
||||
(flags & SYNC_WAIT) ? 0
|
||||
: XFS_B_ASYNC,
|
||||
FI_NONE);
|
||||
}
|
||||
}
|
||||
if (VN_CACHED(inode) && (flags & SYNC_IOWAIT))
|
||||
xfs_ioend_wait(ip);
|
||||
}
|
||||
xfs_ilock(ip, XFS_ILOCK_SHARED);
|
||||
|
||||
if ((flags & SYNC_ATTR) && !xfs_inode_clean(ip)) {
|
||||
if (flags & SYNC_WAIT) {
|
||||
xfs_iflock(ip);
|
||||
if (!xfs_inode_clean(ip))
|
||||
error = xfs_iflush(ip, XFS_IFLUSH_SYNC);
|
||||
else
|
||||
xfs_ifunlock(ip);
|
||||
} else if (xfs_iflock_nowait(ip)) {
|
||||
if (!xfs_inode_clean(ip))
|
||||
error = xfs_iflush(ip, XFS_IFLUSH_DELWRI);
|
||||
else
|
||||
xfs_ifunlock(ip);
|
||||
}
|
||||
}
|
||||
xfs_iput(ip, lock_flags);
|
||||
|
||||
if (error)
|
||||
last_error = error;
|
||||
/*
|
||||
* bail out if the filesystem is corrupted.
|
||||
*/
|
||||
if (error == EFSCORRUPTED)
|
||||
return XFS_ERROR(error);
|
||||
break;
|
||||
|
||||
} while (nr_found);
|
||||
} while (1);
|
||||
|
||||
if (skipped) {
|
||||
delay(1);
|
||||
goto restart;
|
||||
}
|
||||
|
||||
xfs_put_perag(mp, pag);
|
||||
return last_error;
|
||||
}
|
||||
|
||||
int
|
||||
xfs_sync_inodes(
|
||||
xfs_mount_t *mp,
|
||||
int flags)
|
||||
xfs_inode_ag_iterator(
|
||||
struct xfs_mount *mp,
|
||||
int (*execute)(struct xfs_inode *ip,
|
||||
struct xfs_perag *pag, int flags),
|
||||
int flags,
|
||||
int tag)
|
||||
{
|
||||
int error;
|
||||
int last_error;
|
||||
int i;
|
||||
int lflags = XFS_LOG_FORCE;
|
||||
int error = 0;
|
||||
int last_error = 0;
|
||||
xfs_agnumber_t ag;
|
||||
|
||||
if (mp->m_flags & XFS_MOUNT_RDONLY)
|
||||
return 0;
|
||||
error = 0;
|
||||
last_error = 0;
|
||||
|
||||
if (flags & SYNC_WAIT)
|
||||
lflags |= XFS_LOG_SYNC;
|
||||
|
||||
for (i = 0; i < mp->m_sb.sb_agcount; i++) {
|
||||
if (!mp->m_perag[i].pag_ici_init)
|
||||
for (ag = 0; ag < mp->m_sb.sb_agcount; ag++) {
|
||||
if (!mp->m_perag[ag].pag_ici_init)
|
||||
continue;
|
||||
error = xfs_sync_inodes_ag(mp, i, flags);
|
||||
if (error)
|
||||
error = xfs_inode_ag_walk(mp, ag, execute, flags, tag);
|
||||
if (error) {
|
||||
last_error = error;
|
||||
if (error == EFSCORRUPTED)
|
||||
break;
|
||||
if (error == EFSCORRUPTED)
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (flags & SYNC_DELWRI)
|
||||
xfs_log_force(mp, 0, lflags);
|
||||
|
||||
return XFS_ERROR(last_error);
|
||||
}
|
||||
|
||||
/* must be called with pag_ici_lock held and releases it */
|
||||
int
|
||||
xfs_sync_inode_valid(
|
||||
struct xfs_inode *ip,
|
||||
struct xfs_perag *pag)
|
||||
{
|
||||
struct inode *inode = VFS_I(ip);
|
||||
|
||||
/* nothing to sync during shutdown */
|
||||
if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
|
||||
read_unlock(&pag->pag_ici_lock);
|
||||
return EFSCORRUPTED;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we can't get a reference on the inode, it must be in reclaim.
|
||||
* Leave it for the reclaim code to flush. Also avoid inodes that
|
||||
* haven't been fully initialised.
|
||||
*/
|
||||
if (!igrab(inode)) {
|
||||
read_unlock(&pag->pag_ici_lock);
|
||||
return ENOENT;
|
||||
}
|
||||
read_unlock(&pag->pag_ici_lock);
|
||||
|
||||
if (is_bad_inode(inode) || xfs_iflags_test(ip, XFS_INEW)) {
|
||||
IRELE(ip);
|
||||
return ENOENT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
STATIC int
|
||||
xfs_sync_inode_data(
|
||||
struct xfs_inode *ip,
|
||||
struct xfs_perag *pag,
|
||||
int flags)
|
||||
{
|
||||
struct inode *inode = VFS_I(ip);
|
||||
struct address_space *mapping = inode->i_mapping;
|
||||
int error = 0;
|
||||
|
||||
error = xfs_sync_inode_valid(ip, pag);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
|
||||
goto out_wait;
|
||||
|
||||
if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED)) {
|
||||
if (flags & SYNC_TRYLOCK)
|
||||
goto out_wait;
|
||||
xfs_ilock(ip, XFS_IOLOCK_SHARED);
|
||||
}
|
||||
|
||||
error = xfs_flush_pages(ip, 0, -1, (flags & SYNC_WAIT) ?
|
||||
0 : XFS_B_ASYNC, FI_NONE);
|
||||
xfs_iunlock(ip, XFS_IOLOCK_SHARED);
|
||||
|
||||
out_wait:
|
||||
if (flags & SYNC_WAIT)
|
||||
xfs_ioend_wait(ip);
|
||||
IRELE(ip);
|
||||
return error;
|
||||
}
|
||||
|
||||
STATIC int
|
||||
xfs_sync_inode_attr(
|
||||
struct xfs_inode *ip,
|
||||
struct xfs_perag *pag,
|
||||
int flags)
|
||||
{
|
||||
int error = 0;
|
||||
|
||||
error = xfs_sync_inode_valid(ip, pag);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
xfs_ilock(ip, XFS_ILOCK_SHARED);
|
||||
if (xfs_inode_clean(ip))
|
||||
goto out_unlock;
|
||||
if (!xfs_iflock_nowait(ip)) {
|
||||
if (!(flags & SYNC_WAIT))
|
||||
goto out_unlock;
|
||||
xfs_iflock(ip);
|
||||
}
|
||||
|
||||
if (xfs_inode_clean(ip)) {
|
||||
xfs_ifunlock(ip);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
error = xfs_iflush(ip, (flags & SYNC_WAIT) ?
|
||||
XFS_IFLUSH_SYNC : XFS_IFLUSH_DELWRI);
|
||||
|
||||
out_unlock:
|
||||
xfs_iunlock(ip, XFS_ILOCK_SHARED);
|
||||
IRELE(ip);
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Write out pagecache data for the whole filesystem.
|
||||
*/
|
||||
int
|
||||
xfs_sync_data(
|
||||
struct xfs_mount *mp,
|
||||
int flags)
|
||||
{
|
||||
int error;
|
||||
|
||||
ASSERT((flags & ~(SYNC_TRYLOCK|SYNC_WAIT)) == 0);
|
||||
|
||||
error = xfs_inode_ag_iterator(mp, xfs_sync_inode_data, flags,
|
||||
XFS_ICI_NO_TAG);
|
||||
if (error)
|
||||
return XFS_ERROR(error);
|
||||
|
||||
xfs_log_force(mp, 0,
|
||||
(flags & SYNC_WAIT) ?
|
||||
XFS_LOG_FORCE | XFS_LOG_SYNC :
|
||||
XFS_LOG_FORCE);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Write out inode metadata (attributes) for the whole filesystem.
|
||||
*/
|
||||
int
|
||||
xfs_sync_attr(
|
||||
struct xfs_mount *mp,
|
||||
int flags)
|
||||
{
|
||||
ASSERT((flags & ~SYNC_WAIT) == 0);
|
||||
|
||||
return xfs_inode_ag_iterator(mp, xfs_sync_inode_attr, flags,
|
||||
XFS_ICI_NO_TAG);
|
||||
}
|
||||
|
||||
STATIC int
|
||||
xfs_commit_dummy_trans(
|
||||
struct xfs_mount *mp,
|
||||
@ -252,7 +353,7 @@ xfs_sync_fsdata(
|
||||
* If this is xfssyncd() then only sync the superblock if we can
|
||||
* lock it without sleeping and it is not pinned.
|
||||
*/
|
||||
if (flags & SYNC_BDFLUSH) {
|
||||
if (flags & SYNC_TRYLOCK) {
|
||||
ASSERT(!(flags & SYNC_WAIT));
|
||||
|
||||
bp = xfs_getsb(mp, XFS_BUF_TRYLOCK);
|
||||
@ -316,13 +417,13 @@ xfs_quiesce_data(
|
||||
int error;
|
||||
|
||||
/* push non-blocking */
|
||||
xfs_sync_inodes(mp, SYNC_DELWRI|SYNC_BDFLUSH);
|
||||
XFS_QM_DQSYNC(mp, SYNC_BDFLUSH);
|
||||
xfs_sync_data(mp, 0);
|
||||
xfs_qm_sync(mp, SYNC_TRYLOCK);
|
||||
xfs_filestream_flush(mp);
|
||||
|
||||
/* push and block */
|
||||
xfs_sync_inodes(mp, SYNC_DELWRI|SYNC_WAIT|SYNC_IOWAIT);
|
||||
XFS_QM_DQSYNC(mp, SYNC_WAIT);
|
||||
xfs_sync_data(mp, SYNC_WAIT);
|
||||
xfs_qm_sync(mp, SYNC_WAIT);
|
||||
|
||||
/* write superblock and hoover up shutdown errors */
|
||||
error = xfs_sync_fsdata(mp, 0);
|
||||
@ -341,7 +442,7 @@ xfs_quiesce_fs(
|
||||
int count = 0, pincount;
|
||||
|
||||
xfs_flush_buftarg(mp->m_ddev_targp, 0);
|
||||
xfs_reclaim_inodes(mp, 0, XFS_IFLUSH_DELWRI_ELSE_ASYNC);
|
||||
xfs_reclaim_inodes(mp, XFS_IFLUSH_DELWRI_ELSE_ASYNC);
|
||||
|
||||
/*
|
||||
* This loop must run at least twice. The first instance of the loop
|
||||
@ -350,7 +451,7 @@ xfs_quiesce_fs(
|
||||
* logged before we can write the unmount record.
|
||||
*/
|
||||
do {
|
||||
xfs_sync_inodes(mp, SYNC_ATTR|SYNC_WAIT);
|
||||
xfs_sync_attr(mp, SYNC_WAIT);
|
||||
pincount = xfs_flush_buftarg(mp->m_ddev_targp, 1);
|
||||
if (!pincount) {
|
||||
delay(50);
|
||||
@ -433,8 +534,8 @@ xfs_flush_inodes_work(
|
||||
void *arg)
|
||||
{
|
||||
struct inode *inode = arg;
|
||||
xfs_sync_inodes(mp, SYNC_DELWRI | SYNC_TRYLOCK);
|
||||
xfs_sync_inodes(mp, SYNC_DELWRI | SYNC_TRYLOCK | SYNC_IOWAIT);
|
||||
xfs_sync_data(mp, SYNC_TRYLOCK);
|
||||
xfs_sync_data(mp, SYNC_TRYLOCK | SYNC_WAIT);
|
||||
iput(inode);
|
||||
}
|
||||
|
||||
@ -465,10 +566,10 @@ xfs_sync_worker(
|
||||
|
||||
if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
|
||||
xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE);
|
||||
xfs_reclaim_inodes(mp, 0, XFS_IFLUSH_DELWRI_ELSE_ASYNC);
|
||||
xfs_reclaim_inodes(mp, XFS_IFLUSH_DELWRI_ELSE_ASYNC);
|
||||
/* dgc: errors ignored here */
|
||||
error = XFS_QM_DQSYNC(mp, SYNC_BDFLUSH);
|
||||
error = xfs_sync_fsdata(mp, SYNC_BDFLUSH);
|
||||
error = xfs_qm_sync(mp, SYNC_TRYLOCK);
|
||||
error = xfs_sync_fsdata(mp, SYNC_TRYLOCK);
|
||||
if (xfs_log_need_covered(mp))
|
||||
error = xfs_commit_dummy_trans(mp, XFS_LOG_FORCE);
|
||||
}
|
||||
@ -569,7 +670,7 @@ xfs_reclaim_inode(
|
||||
xfs_ifunlock(ip);
|
||||
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
||||
}
|
||||
return 1;
|
||||
return -EAGAIN;
|
||||
}
|
||||
__xfs_iflags_set(ip, XFS_IRECLAIM);
|
||||
spin_unlock(&ip->i_flags_lock);
|
||||
@ -654,101 +755,27 @@ xfs_inode_clear_reclaim_tag(
|
||||
xfs_put_perag(mp, pag);
|
||||
}
|
||||
|
||||
|
||||
STATIC void
|
||||
xfs_reclaim_inodes_ag(
|
||||
xfs_mount_t *mp,
|
||||
int ag,
|
||||
int noblock,
|
||||
int mode)
|
||||
STATIC int
|
||||
xfs_reclaim_inode_now(
|
||||
struct xfs_inode *ip,
|
||||
struct xfs_perag *pag,
|
||||
int flags)
|
||||
{
|
||||
xfs_inode_t *ip = NULL;
|
||||
xfs_perag_t *pag = &mp->m_perag[ag];
|
||||
int nr_found;
|
||||
uint32_t first_index;
|
||||
int skipped;
|
||||
|
||||
restart:
|
||||
first_index = 0;
|
||||
skipped = 0;
|
||||
do {
|
||||
/*
|
||||
* use a gang lookup to find the next inode in the tree
|
||||
* as the tree is sparse and a gang lookup walks to find
|
||||
* the number of objects requested.
|
||||
*/
|
||||
read_lock(&pag->pag_ici_lock);
|
||||
nr_found = radix_tree_gang_lookup_tag(&pag->pag_ici_root,
|
||||
(void**)&ip, first_index, 1,
|
||||
XFS_ICI_RECLAIM_TAG);
|
||||
|
||||
if (!nr_found) {
|
||||
read_unlock(&pag->pag_ici_lock);
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* Update the index for the next lookup. Catch overflows
|
||||
* into the next AG range which can occur if we have inodes
|
||||
* in the last block of the AG and we are currently
|
||||
* pointing to the last inode.
|
||||
*/
|
||||
first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
|
||||
if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) {
|
||||
read_unlock(&pag->pag_ici_lock);
|
||||
break;
|
||||
}
|
||||
|
||||
/* ignore if already under reclaim */
|
||||
if (xfs_iflags_test(ip, XFS_IRECLAIM)) {
|
||||
read_unlock(&pag->pag_ici_lock);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (noblock) {
|
||||
if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
|
||||
read_unlock(&pag->pag_ici_lock);
|
||||
continue;
|
||||
}
|
||||
if (xfs_ipincount(ip) ||
|
||||
!xfs_iflock_nowait(ip)) {
|
||||
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
||||
read_unlock(&pag->pag_ici_lock);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
/* ignore if already under reclaim */
|
||||
if (xfs_iflags_test(ip, XFS_IRECLAIM)) {
|
||||
read_unlock(&pag->pag_ici_lock);
|
||||
|
||||
/*
|
||||
* hmmm - this is an inode already in reclaim. Do
|
||||
* we even bother catching it here?
|
||||
*/
|
||||
if (xfs_reclaim_inode(ip, noblock, mode))
|
||||
skipped++;
|
||||
} while (nr_found);
|
||||
|
||||
if (skipped) {
|
||||
delay(1);
|
||||
goto restart;
|
||||
return 0;
|
||||
}
|
||||
return;
|
||||
read_unlock(&pag->pag_ici_lock);
|
||||
|
||||
return xfs_reclaim_inode(ip, 0, flags);
|
||||
}
|
||||
|
||||
int
|
||||
xfs_reclaim_inodes(
|
||||
xfs_mount_t *mp,
|
||||
int noblock,
|
||||
int mode)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < mp->m_sb.sb_agcount; i++) {
|
||||
if (!mp->m_perag[i].pag_ici_init)
|
||||
continue;
|
||||
xfs_reclaim_inodes_ag(mp, i, noblock, mode);
|
||||
}
|
||||
return 0;
|
||||
return xfs_inode_ag_iterator(mp, xfs_reclaim_inode_now, mode,
|
||||
XFS_ICI_RECLAIM_TAG);
|
||||
}
|
||||
|
||||
|
||||
|
@ -29,17 +29,14 @@ typedef struct xfs_sync_work {
|
||||
struct completion *w_completion;
|
||||
} xfs_sync_work_t;
|
||||
|
||||
#define SYNC_ATTR 0x0001 /* sync attributes */
|
||||
#define SYNC_DELWRI 0x0002 /* look at delayed writes */
|
||||
#define SYNC_WAIT 0x0004 /* wait for i/o to complete */
|
||||
#define SYNC_BDFLUSH 0x0008 /* BDFLUSH is calling -- don't block */
|
||||
#define SYNC_IOWAIT 0x0010 /* wait for all I/O to complete */
|
||||
#define SYNC_TRYLOCK 0x0020 /* only try to lock inodes */
|
||||
#define SYNC_WAIT 0x0001 /* wait for i/o to complete */
|
||||
#define SYNC_TRYLOCK 0x0002 /* only try to lock inodes */
|
||||
|
||||
int xfs_syncd_init(struct xfs_mount *mp);
|
||||
void xfs_syncd_stop(struct xfs_mount *mp);
|
||||
|
||||
int xfs_sync_inodes(struct xfs_mount *mp, int flags);
|
||||
int xfs_sync_attr(struct xfs_mount *mp, int flags);
|
||||
int xfs_sync_data(struct xfs_mount *mp, int flags);
|
||||
int xfs_sync_fsdata(struct xfs_mount *mp, int flags);
|
||||
|
||||
int xfs_quiesce_data(struct xfs_mount *mp);
|
||||
@ -48,10 +45,16 @@ void xfs_quiesce_attr(struct xfs_mount *mp);
|
||||
void xfs_flush_inodes(struct xfs_inode *ip);
|
||||
|
||||
int xfs_reclaim_inode(struct xfs_inode *ip, int locked, int sync_mode);
|
||||
int xfs_reclaim_inodes(struct xfs_mount *mp, int noblock, int mode);
|
||||
int xfs_reclaim_inodes(struct xfs_mount *mp, int mode);
|
||||
|
||||
void xfs_inode_set_reclaim_tag(struct xfs_inode *ip);
|
||||
void xfs_inode_clear_reclaim_tag(struct xfs_inode *ip);
|
||||
void __xfs_inode_clear_reclaim_tag(struct xfs_mount *mp, struct xfs_perag *pag,
|
||||
struct xfs_inode *ip);
|
||||
|
||||
int xfs_sync_inode_valid(struct xfs_inode *ip, struct xfs_perag *pag);
|
||||
int xfs_inode_ag_iterator(struct xfs_mount *mp,
|
||||
int (*execute)(struct xfs_inode *ip, struct xfs_perag *pag, int flags),
|
||||
int flags, int tag);
|
||||
|
||||
#endif
|
||||
|
@ -29,67 +29,6 @@
|
||||
#include <linux/xattr.h>
|
||||
|
||||
|
||||
/*
|
||||
* ACL handling. Should eventually be moved into xfs_acl.c
|
||||
*/
|
||||
|
||||
static int
|
||||
xfs_decode_acl(const char *name)
|
||||
{
|
||||
if (strcmp(name, "posix_acl_access") == 0)
|
||||
return _ACL_TYPE_ACCESS;
|
||||
else if (strcmp(name, "posix_acl_default") == 0)
|
||||
return _ACL_TYPE_DEFAULT;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get system extended attributes which at the moment only
|
||||
* includes Posix ACLs.
|
||||
*/
|
||||
static int
|
||||
xfs_xattr_system_get(struct inode *inode, const char *name,
|
||||
void *buffer, size_t size)
|
||||
{
|
||||
int acl;
|
||||
|
||||
acl = xfs_decode_acl(name);
|
||||
if (acl < 0)
|
||||
return acl;
|
||||
|
||||
return xfs_acl_vget(inode, buffer, size, acl);
|
||||
}
|
||||
|
||||
static int
|
||||
xfs_xattr_system_set(struct inode *inode, const char *name,
|
||||
const void *value, size_t size, int flags)
|
||||
{
|
||||
int acl;
|
||||
|
||||
acl = xfs_decode_acl(name);
|
||||
if (acl < 0)
|
||||
return acl;
|
||||
if (flags & XATTR_CREATE)
|
||||
return -EINVAL;
|
||||
|
||||
if (!value)
|
||||
return xfs_acl_vremove(inode, acl);
|
||||
|
||||
return xfs_acl_vset(inode, (void *)value, size, acl);
|
||||
}
|
||||
|
||||
static struct xattr_handler xfs_xattr_system_handler = {
|
||||
.prefix = XATTR_SYSTEM_PREFIX,
|
||||
.get = xfs_xattr_system_get,
|
||||
.set = xfs_xattr_system_set,
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* Real xattr handling. The only difference between the namespaces is
|
||||
* a flag passed to the low-level attr code.
|
||||
*/
|
||||
|
||||
static int
|
||||
__xfs_xattr_get(struct inode *inode, const char *name,
|
||||
void *value, size_t size, int xflags)
|
||||
@ -199,7 +138,9 @@ struct xattr_handler *xfs_xattr_handlers[] = {
|
||||
&xfs_xattr_user_handler,
|
||||
&xfs_xattr_trusted_handler,
|
||||
&xfs_xattr_security_handler,
|
||||
#ifdef CONFIG_XFS_POSIX_ACL
|
||||
&xfs_xattr_system_handler,
|
||||
#endif
|
||||
NULL
|
||||
};
|
||||
|
||||
@ -310,7 +251,7 @@ xfs_vn_listxattr(struct dentry *dentry, char *data, size_t size)
|
||||
/*
|
||||
* Then add the two synthetic ACL attributes.
|
||||
*/
|
||||
if (xfs_acl_vhasacl_access(inode)) {
|
||||
if (posix_acl_access_exists(inode)) {
|
||||
error = list_one_attr(POSIX_ACL_XATTR_ACCESS,
|
||||
strlen(POSIX_ACL_XATTR_ACCESS) + 1,
|
||||
data, size, &context.count);
|
||||
@ -318,7 +259,7 @@ xfs_vn_listxattr(struct dentry *dentry, char *data, size_t size)
|
||||
return error;
|
||||
}
|
||||
|
||||
if (xfs_acl_vhasacl_default(inode)) {
|
||||
if (posix_acl_default_exists(inode)) {
|
||||
error = list_one_attr(POSIX_ACL_XATTR_DEFAULT,
|
||||
strlen(POSIX_ACL_XATTR_DEFAULT) + 1,
|
||||
data, size, &context.count);
|
||||
|
@ -42,7 +42,6 @@
|
||||
#include "xfs_error.h"
|
||||
#include "xfs_itable.h"
|
||||
#include "xfs_rw.h"
|
||||
#include "xfs_acl.h"
|
||||
#include "xfs_attr.h"
|
||||
#include "xfs_buf_item.h"
|
||||
#include "xfs_trans_space.h"
|
||||
@ -1194,7 +1193,9 @@ void
|
||||
xfs_qm_dqrele(
|
||||
xfs_dquot_t *dqp)
|
||||
{
|
||||
ASSERT(dqp);
|
||||
if (!dqp)
|
||||
return;
|
||||
|
||||
xfs_dqtrace_entry(dqp, "DQRELE");
|
||||
|
||||
xfs_dqlock(dqp);
|
||||
|
@ -181,7 +181,6 @@ extern void xfs_qm_adjust_dqlimits(xfs_mount_t *,
|
||||
extern int xfs_qm_dqget(xfs_mount_t *, xfs_inode_t *,
|
||||
xfs_dqid_t, uint, uint, xfs_dquot_t **);
|
||||
extern void xfs_qm_dqput(xfs_dquot_t *);
|
||||
extern void xfs_qm_dqrele(xfs_dquot_t *);
|
||||
extern void xfs_dqlock(xfs_dquot_t *);
|
||||
extern void xfs_dqlock2(xfs_dquot_t *, xfs_dquot_t *);
|
||||
extern void xfs_dqunlock(xfs_dquot_t *);
|
||||
|
@ -42,7 +42,6 @@
|
||||
#include "xfs_error.h"
|
||||
#include "xfs_itable.h"
|
||||
#include "xfs_rw.h"
|
||||
#include "xfs_acl.h"
|
||||
#include "xfs_attr.h"
|
||||
#include "xfs_buf_item.h"
|
||||
#include "xfs_trans_priv.h"
|
||||
|
@ -42,7 +42,6 @@
|
||||
#include "xfs_error.h"
|
||||
#include "xfs_bmap.h"
|
||||
#include "xfs_rw.h"
|
||||
#include "xfs_acl.h"
|
||||
#include "xfs_attr.h"
|
||||
#include "xfs_buf_item.h"
|
||||
#include "xfs_trans_space.h"
|
||||
@ -287,11 +286,13 @@ xfs_qm_rele_quotafs_ref(
|
||||
* Just destroy the quotainfo structure.
|
||||
*/
|
||||
void
|
||||
xfs_qm_unmount_quotadestroy(
|
||||
xfs_mount_t *mp)
|
||||
xfs_qm_unmount(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
if (mp->m_quotainfo)
|
||||
if (mp->m_quotainfo) {
|
||||
xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL | XFS_QMOPT_UMOUNTING);
|
||||
xfs_qm_destroy_quotainfo(mp);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -385,8 +386,13 @@ xfs_qm_mount_quotas(
|
||||
if (error) {
|
||||
xfs_fs_cmn_err(CE_WARN, mp,
|
||||
"Failed to initialize disk quotas.");
|
||||
return;
|
||||
}
|
||||
return;
|
||||
|
||||
#ifdef QUOTADEBUG
|
||||
if (XFS_IS_QUOTA_ON(mp))
|
||||
xfs_qm_internalqcheck(mp);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
@ -774,12 +780,11 @@ xfs_qm_dqattach_grouphint(
|
||||
* Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
|
||||
* into account.
|
||||
* If XFS_QMOPT_DQALLOC, the dquot(s) will be allocated if needed.
|
||||
* If XFS_QMOPT_ILOCKED, then inode sent is already locked EXCL.
|
||||
* Inode may get unlocked and relocked in here, and the caller must deal with
|
||||
* the consequences.
|
||||
*/
|
||||
int
|
||||
xfs_qm_dqattach(
|
||||
xfs_qm_dqattach_locked(
|
||||
xfs_inode_t *ip,
|
||||
uint flags)
|
||||
{
|
||||
@ -787,17 +792,14 @@ xfs_qm_dqattach(
|
||||
uint nquotas = 0;
|
||||
int error = 0;
|
||||
|
||||
if ((! XFS_IS_QUOTA_ON(mp)) ||
|
||||
(! XFS_NOT_DQATTACHED(mp, ip)) ||
|
||||
(ip->i_ino == mp->m_sb.sb_uquotino) ||
|
||||
(ip->i_ino == mp->m_sb.sb_gquotino))
|
||||
if (!XFS_IS_QUOTA_RUNNING(mp) ||
|
||||
!XFS_IS_QUOTA_ON(mp) ||
|
||||
!XFS_NOT_DQATTACHED(mp, ip) ||
|
||||
ip->i_ino == mp->m_sb.sb_uquotino ||
|
||||
ip->i_ino == mp->m_sb.sb_gquotino)
|
||||
return 0;
|
||||
|
||||
ASSERT((flags & XFS_QMOPT_ILOCKED) == 0 ||
|
||||
xfs_isilocked(ip, XFS_ILOCK_EXCL));
|
||||
|
||||
if (! (flags & XFS_QMOPT_ILOCKED))
|
||||
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
||||
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
|
||||
|
||||
if (XFS_IS_UQUOTA_ON(mp)) {
|
||||
error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER,
|
||||
@ -849,8 +851,7 @@ xfs_qm_dqattach(
|
||||
xfs_qm_dqattach_grouphint(ip->i_udquot, ip->i_gdquot);
|
||||
}
|
||||
|
||||
done:
|
||||
|
||||
done:
|
||||
#ifdef QUOTADEBUG
|
||||
if (! error) {
|
||||
if (XFS_IS_UQUOTA_ON(mp))
|
||||
@ -858,15 +859,22 @@ xfs_qm_dqattach(
|
||||
if (XFS_IS_OQUOTA_ON(mp))
|
||||
ASSERT(ip->i_gdquot);
|
||||
}
|
||||
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
|
||||
#endif
|
||||
return error;
|
||||
}
|
||||
|
||||
if (! (flags & XFS_QMOPT_ILOCKED))
|
||||
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
||||
int
|
||||
xfs_qm_dqattach(
|
||||
struct xfs_inode *ip,
|
||||
uint flags)
|
||||
{
|
||||
int error;
|
||||
|
||||
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
||||
error = xfs_qm_dqattach_locked(ip, flags);
|
||||
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
||||
|
||||
#ifdef QUOTADEBUG
|
||||
else
|
||||
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
|
||||
#endif
|
||||
return error;
|
||||
}
|
||||
|
||||
@ -896,11 +904,6 @@ xfs_qm_dqdetach(
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* This is called to sync quotas. We can be told to use non-blocking
|
||||
* semantics by either the SYNC_BDFLUSH flag or the absence of the
|
||||
* SYNC_WAIT flag.
|
||||
*/
|
||||
int
|
||||
xfs_qm_sync(
|
||||
xfs_mount_t *mp,
|
||||
@ -909,17 +912,13 @@ xfs_qm_sync(
|
||||
int recl, restarts;
|
||||
xfs_dquot_t *dqp;
|
||||
uint flush_flags;
|
||||
boolean_t nowait;
|
||||
int error;
|
||||
|
||||
if (! XFS_IS_QUOTA_ON(mp))
|
||||
if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
|
||||
return 0;
|
||||
|
||||
flush_flags = (flags & SYNC_WAIT) ? XFS_QMOPT_SYNC : XFS_QMOPT_DELWRI;
|
||||
restarts = 0;
|
||||
/*
|
||||
* We won't block unless we are asked to.
|
||||
*/
|
||||
nowait = (boolean_t)(flags & SYNC_BDFLUSH || (flags & SYNC_WAIT) == 0);
|
||||
|
||||
again:
|
||||
xfs_qm_mplist_lock(mp);
|
||||
@ -939,18 +938,10 @@ xfs_qm_sync(
|
||||
* don't 'seem' to be dirty. ie. don't acquire dqlock.
|
||||
* This is very similar to what xfs_sync does with inodes.
|
||||
*/
|
||||
if (flags & SYNC_BDFLUSH) {
|
||||
if (! XFS_DQ_IS_DIRTY(dqp))
|
||||
if (flags & SYNC_TRYLOCK) {
|
||||
if (!XFS_DQ_IS_DIRTY(dqp))
|
||||
continue;
|
||||
}
|
||||
|
||||
if (nowait) {
|
||||
/*
|
||||
* Try to acquire the dquot lock. We are NOT out of
|
||||
* lock order, but we just don't want to wait for this
|
||||
* lock, unless somebody wanted us to.
|
||||
*/
|
||||
if (! xfs_qm_dqlock_nowait(dqp))
|
||||
if (!xfs_qm_dqlock_nowait(dqp))
|
||||
continue;
|
||||
} else {
|
||||
xfs_dqlock(dqp);
|
||||
@ -967,7 +958,7 @@ xfs_qm_sync(
|
||||
/* XXX a sentinel would be better */
|
||||
recl = XFS_QI_MPLRECLAIMS(mp);
|
||||
if (!xfs_dqflock_nowait(dqp)) {
|
||||
if (nowait) {
|
||||
if (flags & SYNC_TRYLOCK) {
|
||||
xfs_dqunlock(dqp);
|
||||
continue;
|
||||
}
|
||||
@ -985,7 +976,6 @@ xfs_qm_sync(
|
||||
* Let go of the mplist lock. We don't want to hold it
|
||||
* across a disk write
|
||||
*/
|
||||
flush_flags = (nowait) ? XFS_QMOPT_DELWRI : XFS_QMOPT_SYNC;
|
||||
xfs_qm_mplist_unlock(mp);
|
||||
xfs_dqtrace_entry(dqp, "XQM_SYNC: DQFLUSH");
|
||||
error = xfs_qm_dqflush(dqp, flush_flags);
|
||||
@ -2319,20 +2309,20 @@ xfs_qm_write_sb_changes(
|
||||
*/
|
||||
int
|
||||
xfs_qm_vop_dqalloc(
|
||||
xfs_mount_t *mp,
|
||||
xfs_inode_t *ip,
|
||||
uid_t uid,
|
||||
gid_t gid,
|
||||
prid_t prid,
|
||||
uint flags,
|
||||
xfs_dquot_t **O_udqpp,
|
||||
xfs_dquot_t **O_gdqpp)
|
||||
struct xfs_inode *ip,
|
||||
uid_t uid,
|
||||
gid_t gid,
|
||||
prid_t prid,
|
||||
uint flags,
|
||||
struct xfs_dquot **O_udqpp,
|
||||
struct xfs_dquot **O_gdqpp)
|
||||
{
|
||||
int error;
|
||||
xfs_dquot_t *uq, *gq;
|
||||
uint lockflags;
|
||||
struct xfs_mount *mp = ip->i_mount;
|
||||
struct xfs_dquot *uq, *gq;
|
||||
int error;
|
||||
uint lockflags;
|
||||
|
||||
if (!XFS_IS_QUOTA_ON(mp))
|
||||
if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
|
||||
return 0;
|
||||
|
||||
lockflags = XFS_ILOCK_EXCL;
|
||||
@ -2346,8 +2336,8 @@ xfs_qm_vop_dqalloc(
|
||||
* if necessary. The dquot(s) will not be locked.
|
||||
*/
|
||||
if (XFS_NOT_DQATTACHED(mp, ip)) {
|
||||
if ((error = xfs_qm_dqattach(ip, XFS_QMOPT_DQALLOC |
|
||||
XFS_QMOPT_ILOCKED))) {
|
||||
error = xfs_qm_dqattach_locked(ip, XFS_QMOPT_DQALLOC);
|
||||
if (error) {
|
||||
xfs_iunlock(ip, lockflags);
|
||||
return error;
|
||||
}
|
||||
@ -2469,6 +2459,7 @@ xfs_qm_vop_chown(
|
||||
uint bfield = XFS_IS_REALTIME_INODE(ip) ?
|
||||
XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
|
||||
|
||||
|
||||
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
|
||||
ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount));
|
||||
|
||||
@ -2508,13 +2499,13 @@ xfs_qm_vop_chown_reserve(
|
||||
xfs_dquot_t *gdqp,
|
||||
uint flags)
|
||||
{
|
||||
int error;
|
||||
xfs_mount_t *mp;
|
||||
xfs_mount_t *mp = ip->i_mount;
|
||||
uint delblks, blkflags, prjflags = 0;
|
||||
xfs_dquot_t *unresudq, *unresgdq, *delblksudq, *delblksgdq;
|
||||
int error;
|
||||
|
||||
|
||||
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
|
||||
mp = ip->i_mount;
|
||||
ASSERT(XFS_IS_QUOTA_RUNNING(mp));
|
||||
|
||||
delblks = ip->i_delayed_blks;
|
||||
@ -2582,28 +2573,23 @@ xfs_qm_vop_chown_reserve(
|
||||
|
||||
int
|
||||
xfs_qm_vop_rename_dqattach(
|
||||
xfs_inode_t **i_tab)
|
||||
struct xfs_inode **i_tab)
|
||||
{
|
||||
xfs_inode_t *ip;
|
||||
int i;
|
||||
int error;
|
||||
struct xfs_mount *mp = i_tab[0]->i_mount;
|
||||
int i;
|
||||
|
||||
ip = i_tab[0];
|
||||
|
||||
if (! XFS_IS_QUOTA_ON(ip->i_mount))
|
||||
if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
|
||||
return 0;
|
||||
|
||||
if (XFS_NOT_DQATTACHED(ip->i_mount, ip)) {
|
||||
error = xfs_qm_dqattach(ip, 0);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
for (i = 1; (i < 4 && i_tab[i]); i++) {
|
||||
for (i = 0; (i < 4 && i_tab[i]); i++) {
|
||||
struct xfs_inode *ip = i_tab[i];
|
||||
int error;
|
||||
|
||||
/*
|
||||
* Watch out for duplicate entries in the table.
|
||||
*/
|
||||
if ((ip = i_tab[i]) != i_tab[i-1]) {
|
||||
if (XFS_NOT_DQATTACHED(ip->i_mount, ip)) {
|
||||
if (i == 0 || ip != i_tab[i-1]) {
|
||||
if (XFS_NOT_DQATTACHED(mp, ip)) {
|
||||
error = xfs_qm_dqattach(ip, 0);
|
||||
if (error)
|
||||
return error;
|
||||
@ -2614,17 +2600,19 @@ xfs_qm_vop_rename_dqattach(
|
||||
}
|
||||
|
||||
void
|
||||
xfs_qm_vop_dqattach_and_dqmod_newinode(
|
||||
xfs_trans_t *tp,
|
||||
xfs_inode_t *ip,
|
||||
xfs_dquot_t *udqp,
|
||||
xfs_dquot_t *gdqp)
|
||||
xfs_qm_vop_create_dqattach(
|
||||
struct xfs_trans *tp,
|
||||
struct xfs_inode *ip,
|
||||
struct xfs_dquot *udqp,
|
||||
struct xfs_dquot *gdqp)
|
||||
{
|
||||
if (!XFS_IS_QUOTA_ON(tp->t_mountp))
|
||||
struct xfs_mount *mp = tp->t_mountp;
|
||||
|
||||
if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
|
||||
return;
|
||||
|
||||
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
|
||||
ASSERT(XFS_IS_QUOTA_RUNNING(tp->t_mountp));
|
||||
ASSERT(XFS_IS_QUOTA_RUNNING(mp));
|
||||
|
||||
if (udqp) {
|
||||
xfs_dqlock(udqp);
|
||||
@ -2632,7 +2620,7 @@ xfs_qm_vop_dqattach_and_dqmod_newinode(
|
||||
xfs_dqunlock(udqp);
|
||||
ASSERT(ip->i_udquot == NULL);
|
||||
ip->i_udquot = udqp;
|
||||
ASSERT(XFS_IS_UQUOTA_ON(tp->t_mountp));
|
||||
ASSERT(XFS_IS_UQUOTA_ON(mp));
|
||||
ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id));
|
||||
xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
|
||||
}
|
||||
@ -2642,8 +2630,8 @@ xfs_qm_vop_dqattach_and_dqmod_newinode(
|
||||
xfs_dqunlock(gdqp);
|
||||
ASSERT(ip->i_gdquot == NULL);
|
||||
ip->i_gdquot = gdqp;
|
||||
ASSERT(XFS_IS_OQUOTA_ON(tp->t_mountp));
|
||||
ASSERT((XFS_IS_GQUOTA_ON(tp->t_mountp) ?
|
||||
ASSERT(XFS_IS_OQUOTA_ON(mp));
|
||||
ASSERT((XFS_IS_GQUOTA_ON(mp) ?
|
||||
ip->i_d.di_gid : ip->i_d.di_projid) ==
|
||||
be32_to_cpu(gdqp->q_core.d_id));
|
||||
xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
|
||||
|
@ -127,8 +127,6 @@ typedef struct xfs_quotainfo {
|
||||
} xfs_quotainfo_t;
|
||||
|
||||
|
||||
extern xfs_dqtrxops_t xfs_trans_dquot_ops;
|
||||
|
||||
extern void xfs_trans_mod_dquot(xfs_trans_t *, xfs_dquot_t *, uint, long);
|
||||
extern int xfs_trans_reserve_quota_bydquots(xfs_trans_t *, xfs_mount_t *,
|
||||
xfs_dquot_t *, xfs_dquot_t *, long, long, uint);
|
||||
@ -159,17 +157,11 @@ typedef struct xfs_dquot_acct {
|
||||
#define XFS_QM_RTBWARNLIMIT 5
|
||||
|
||||
extern void xfs_qm_destroy_quotainfo(xfs_mount_t *);
|
||||
extern void xfs_qm_mount_quotas(xfs_mount_t *);
|
||||
extern int xfs_qm_quotacheck(xfs_mount_t *);
|
||||
extern void xfs_qm_unmount_quotadestroy(xfs_mount_t *);
|
||||
extern void xfs_qm_unmount_quotas(xfs_mount_t *);
|
||||
extern int xfs_qm_write_sb_changes(xfs_mount_t *, __int64_t);
|
||||
extern int xfs_qm_sync(xfs_mount_t *, int);
|
||||
|
||||
/* dquot stuff */
|
||||
extern boolean_t xfs_qm_dqalloc_incore(xfs_dquot_t **);
|
||||
extern int xfs_qm_dqattach(xfs_inode_t *, uint);
|
||||
extern void xfs_qm_dqdetach(xfs_inode_t *);
|
||||
extern int xfs_qm_dqpurge_all(xfs_mount_t *, uint);
|
||||
extern void xfs_qm_dqrele_all_inodes(xfs_mount_t *, uint);
|
||||
|
||||
@ -183,19 +175,6 @@ extern int xfs_qm_scall_getqstat(xfs_mount_t *, fs_quota_stat_t *);
|
||||
extern int xfs_qm_scall_quotaon(xfs_mount_t *, uint);
|
||||
extern int xfs_qm_scall_quotaoff(xfs_mount_t *, uint);
|
||||
|
||||
/* vop stuff */
|
||||
extern int xfs_qm_vop_dqalloc(xfs_mount_t *, xfs_inode_t *,
|
||||
uid_t, gid_t, prid_t, uint,
|
||||
xfs_dquot_t **, xfs_dquot_t **);
|
||||
extern void xfs_qm_vop_dqattach_and_dqmod_newinode(
|
||||
xfs_trans_t *, xfs_inode_t *,
|
||||
xfs_dquot_t *, xfs_dquot_t *);
|
||||
extern int xfs_qm_vop_rename_dqattach(xfs_inode_t **);
|
||||
extern xfs_dquot_t * xfs_qm_vop_chown(xfs_trans_t *, xfs_inode_t *,
|
||||
xfs_dquot_t **, xfs_dquot_t *);
|
||||
extern int xfs_qm_vop_chown_reserve(xfs_trans_t *, xfs_inode_t *,
|
||||
xfs_dquot_t *, xfs_dquot_t *, uint);
|
||||
|
||||
/* list stuff */
|
||||
extern void xfs_qm_freelist_append(xfs_frlist_t *, xfs_dquot_t *);
|
||||
extern void xfs_qm_freelist_unlink(xfs_dquot_t *);
|
||||
|
@ -42,7 +42,6 @@
|
||||
#include "xfs_rtalloc.h"
|
||||
#include "xfs_error.h"
|
||||
#include "xfs_rw.h"
|
||||
#include "xfs_acl.h"
|
||||
#include "xfs_attr.h"
|
||||
#include "xfs_buf_item.h"
|
||||
#include "xfs_qm.h"
|
||||
@ -84,7 +83,7 @@ xfs_fill_statvfs_from_dquot(
|
||||
* return a statvfs of the project, not the entire filesystem.
|
||||
* This makes such trees appear as if they are filesystems in themselves.
|
||||
*/
|
||||
STATIC void
|
||||
void
|
||||
xfs_qm_statvfs(
|
||||
xfs_inode_t *ip,
|
||||
struct kstatfs *statp)
|
||||
@ -92,20 +91,13 @@ xfs_qm_statvfs(
|
||||
xfs_mount_t *mp = ip->i_mount;
|
||||
xfs_dquot_t *dqp;
|
||||
|
||||
if (!(ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) ||
|
||||
!((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_OQUOTA_ENFD))) ==
|
||||
(XFS_PQUOTA_ACCT|XFS_OQUOTA_ENFD))
|
||||
return;
|
||||
|
||||
if (!xfs_qm_dqget(mp, NULL, ip->i_d.di_projid, XFS_DQ_PROJ, 0, &dqp)) {
|
||||
xfs_disk_dquot_t *dp = &dqp->q_core;
|
||||
|
||||
xfs_fill_statvfs_from_dquot(statp, dp);
|
||||
xfs_fill_statvfs_from_dquot(statp, &dqp->q_core);
|
||||
xfs_qm_dqput(dqp);
|
||||
}
|
||||
}
|
||||
|
||||
STATIC int
|
||||
int
|
||||
xfs_qm_newmount(
|
||||
xfs_mount_t *mp,
|
||||
uint *needquotamount,
|
||||
@ -114,9 +106,6 @@ xfs_qm_newmount(
|
||||
uint quotaondisk;
|
||||
uint uquotaondisk = 0, gquotaondisk = 0, pquotaondisk = 0;
|
||||
|
||||
*quotaflags = 0;
|
||||
*needquotamount = B_FALSE;
|
||||
|
||||
quotaondisk = xfs_sb_version_hasquota(&mp->m_sb) &&
|
||||
(mp->m_sb.sb_qflags & XFS_ALL_QUOTA_ACCT);
|
||||
|
||||
@ -179,66 +168,6 @@ xfs_qm_newmount(
|
||||
return 0;
|
||||
}
|
||||
|
||||
STATIC int
|
||||
xfs_qm_endmount(
|
||||
xfs_mount_t *mp,
|
||||
uint needquotamount,
|
||||
uint quotaflags)
|
||||
{
|
||||
if (needquotamount) {
|
||||
ASSERT(mp->m_qflags == 0);
|
||||
mp->m_qflags = quotaflags;
|
||||
xfs_qm_mount_quotas(mp);
|
||||
}
|
||||
|
||||
#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
|
||||
if (! (XFS_IS_QUOTA_ON(mp)))
|
||||
xfs_fs_cmn_err(CE_NOTE, mp, "Disk quotas not turned on");
|
||||
else
|
||||
xfs_fs_cmn_err(CE_NOTE, mp, "Disk quotas turned on");
|
||||
#endif
|
||||
|
||||
#ifdef QUOTADEBUG
|
||||
if (XFS_IS_QUOTA_ON(mp) && xfs_qm_internalqcheck(mp))
|
||||
cmn_err(CE_WARN, "XFS: mount internalqcheck failed");
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
STATIC void
|
||||
xfs_qm_dqrele_null(
|
||||
xfs_dquot_t *dq)
|
||||
{
|
||||
/*
|
||||
* Called from XFS, where we always check first for a NULL dquot.
|
||||
*/
|
||||
if (!dq)
|
||||
return;
|
||||
xfs_qm_dqrele(dq);
|
||||
}
|
||||
|
||||
|
||||
struct xfs_qmops xfs_qmcore_xfs = {
|
||||
.xfs_qminit = xfs_qm_newmount,
|
||||
.xfs_qmdone = xfs_qm_unmount_quotadestroy,
|
||||
.xfs_qmmount = xfs_qm_endmount,
|
||||
.xfs_qmunmount = xfs_qm_unmount_quotas,
|
||||
.xfs_dqrele = xfs_qm_dqrele_null,
|
||||
.xfs_dqattach = xfs_qm_dqattach,
|
||||
.xfs_dqdetach = xfs_qm_dqdetach,
|
||||
.xfs_dqpurgeall = xfs_qm_dqpurge_all,
|
||||
.xfs_dqvopalloc = xfs_qm_vop_dqalloc,
|
||||
.xfs_dqvopcreate = xfs_qm_vop_dqattach_and_dqmod_newinode,
|
||||
.xfs_dqvoprename = xfs_qm_vop_rename_dqattach,
|
||||
.xfs_dqvopchown = xfs_qm_vop_chown,
|
||||
.xfs_dqvopchownresv = xfs_qm_vop_chown_reserve,
|
||||
.xfs_dqstatvfs = xfs_qm_statvfs,
|
||||
.xfs_dqsync = xfs_qm_sync,
|
||||
.xfs_dqtrxops = &xfs_trans_dquot_ops,
|
||||
};
|
||||
EXPORT_SYMBOL(xfs_qmcore_xfs);
|
||||
|
||||
void __init
|
||||
xfs_qm_init(void)
|
||||
{
|
||||
|
@ -42,7 +42,6 @@
|
||||
#include "xfs_rtalloc.h"
|
||||
#include "xfs_error.h"
|
||||
#include "xfs_rw.h"
|
||||
#include "xfs_acl.h"
|
||||
#include "xfs_attr.h"
|
||||
#include "xfs_buf_item.h"
|
||||
#include "xfs_qm.h"
|
||||
|
@ -45,7 +45,6 @@
|
||||
#include "xfs_rtalloc.h"
|
||||
#include "xfs_error.h"
|
||||
#include "xfs_rw.h"
|
||||
#include "xfs_acl.h"
|
||||
#include "xfs_attr.h"
|
||||
#include "xfs_buf_item.h"
|
||||
#include "xfs_utils.h"
|
||||
@ -847,105 +846,55 @@ xfs_qm_export_flags(
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Release all the dquots on the inodes in an AG.
|
||||
*/
|
||||
STATIC void
|
||||
xfs_qm_dqrele_inodes_ag(
|
||||
xfs_mount_t *mp,
|
||||
int ag,
|
||||
uint flags)
|
||||
STATIC int
|
||||
xfs_dqrele_inode(
|
||||
struct xfs_inode *ip,
|
||||
struct xfs_perag *pag,
|
||||
int flags)
|
||||
{
|
||||
xfs_inode_t *ip = NULL;
|
||||
xfs_perag_t *pag = &mp->m_perag[ag];
|
||||
int first_index = 0;
|
||||
int nr_found;
|
||||
int error;
|
||||
|
||||
do {
|
||||
/*
|
||||
* use a gang lookup to find the next inode in the tree
|
||||
* as the tree is sparse and a gang lookup walks to find
|
||||
* the number of objects requested.
|
||||
*/
|
||||
read_lock(&pag->pag_ici_lock);
|
||||
nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
|
||||
(void**)&ip, first_index, 1);
|
||||
|
||||
if (!nr_found) {
|
||||
read_unlock(&pag->pag_ici_lock);
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* Update the index for the next lookup. Catch overflows
|
||||
* into the next AG range which can occur if we have inodes
|
||||
* in the last block of the AG and we are currently
|
||||
* pointing to the last inode.
|
||||
*/
|
||||
first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
|
||||
if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) {
|
||||
read_unlock(&pag->pag_ici_lock);
|
||||
break;
|
||||
}
|
||||
|
||||
/* skip quota inodes */
|
||||
if (ip == XFS_QI_UQIP(mp) || ip == XFS_QI_GQIP(mp)) {
|
||||
ASSERT(ip->i_udquot == NULL);
|
||||
ASSERT(ip->i_gdquot == NULL);
|
||||
read_unlock(&pag->pag_ici_lock);
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we can't get a reference on the inode, it must be
|
||||
* in reclaim. Leave it for the reclaim code to flush.
|
||||
*/
|
||||
if (!igrab(VFS_I(ip))) {
|
||||
read_unlock(&pag->pag_ici_lock);
|
||||
continue;
|
||||
}
|
||||
/* skip quota inodes */
|
||||
if (ip == XFS_QI_UQIP(ip->i_mount) || ip == XFS_QI_GQIP(ip->i_mount)) {
|
||||
ASSERT(ip->i_udquot == NULL);
|
||||
ASSERT(ip->i_gdquot == NULL);
|
||||
read_unlock(&pag->pag_ici_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* avoid new inodes though we shouldn't find any here */
|
||||
if (xfs_iflags_test(ip, XFS_INEW)) {
|
||||
IRELE(ip);
|
||||
continue;
|
||||
}
|
||||
error = xfs_sync_inode_valid(ip, pag);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
||||
if ((flags & XFS_UQUOTA_ACCT) && ip->i_udquot) {
|
||||
xfs_qm_dqrele(ip->i_udquot);
|
||||
ip->i_udquot = NULL;
|
||||
}
|
||||
if (flags & (XFS_PQUOTA_ACCT|XFS_GQUOTA_ACCT) &&
|
||||
ip->i_gdquot) {
|
||||
xfs_qm_dqrele(ip->i_gdquot);
|
||||
ip->i_gdquot = NULL;
|
||||
}
|
||||
xfs_iput(ip, XFS_ILOCK_EXCL);
|
||||
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
||||
if ((flags & XFS_UQUOTA_ACCT) && ip->i_udquot) {
|
||||
xfs_qm_dqrele(ip->i_udquot);
|
||||
ip->i_udquot = NULL;
|
||||
}
|
||||
if (flags & (XFS_PQUOTA_ACCT|XFS_GQUOTA_ACCT) && ip->i_gdquot) {
|
||||
xfs_qm_dqrele(ip->i_gdquot);
|
||||
ip->i_gdquot = NULL;
|
||||
}
|
||||
xfs_iput(ip, XFS_ILOCK_EXCL);
|
||||
IRELE(ip);
|
||||
|
||||
} while (nr_found);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Go thru all the inodes in the file system, releasing their dquots.
|
||||
*
|
||||
* Note that the mount structure gets modified to indicate that quotas are off
|
||||
* AFTER this, in the case of quotaoff. This also gets called from
|
||||
* xfs_rootumount.
|
||||
* AFTER this, in the case of quotaoff.
|
||||
*/
|
||||
void
|
||||
xfs_qm_dqrele_all_inodes(
|
||||
struct xfs_mount *mp,
|
||||
uint flags)
|
||||
{
|
||||
int i;
|
||||
|
||||
ASSERT(mp->m_quotainfo);
|
||||
for (i = 0; i < mp->m_sb.sb_agcount; i++) {
|
||||
if (!mp->m_perag[i].pag_ici_init)
|
||||
continue;
|
||||
xfs_qm_dqrele_inodes_ag(mp, i, flags);
|
||||
}
|
||||
xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags, XFS_ICI_NO_TAG);
|
||||
}
|
||||
|
||||
/*------------------------------------------------------------------------*/
|
||||
|
@ -42,7 +42,6 @@
|
||||
#include "xfs_rtalloc.h"
|
||||
#include "xfs_error.h"
|
||||
#include "xfs_rw.h"
|
||||
#include "xfs_acl.h"
|
||||
#include "xfs_attr.h"
|
||||
#include "xfs_buf_item.h"
|
||||
#include "xfs_trans_priv.h"
|
||||
@ -111,7 +110,7 @@ xfs_trans_log_dquot(
|
||||
* Carry forward whatever is left of the quota blk reservation to
|
||||
* the spanky new transaction
|
||||
*/
|
||||
STATIC void
|
||||
void
|
||||
xfs_trans_dup_dqinfo(
|
||||
xfs_trans_t *otp,
|
||||
xfs_trans_t *ntp)
|
||||
@ -167,19 +166,17 @@ xfs_trans_dup_dqinfo(
|
||||
/*
|
||||
* Wrap around mod_dquot to account for both user and group quotas.
|
||||
*/
|
||||
STATIC void
|
||||
void
|
||||
xfs_trans_mod_dquot_byino(
|
||||
xfs_trans_t *tp,
|
||||
xfs_inode_t *ip,
|
||||
uint field,
|
||||
long delta)
|
||||
{
|
||||
xfs_mount_t *mp;
|
||||
xfs_mount_t *mp = tp->t_mountp;
|
||||
|
||||
ASSERT(tp);
|
||||
mp = tp->t_mountp;
|
||||
|
||||
if (!XFS_IS_QUOTA_ON(mp) ||
|
||||
if (!XFS_IS_QUOTA_RUNNING(mp) ||
|
||||
!XFS_IS_QUOTA_ON(mp) ||
|
||||
ip->i_ino == mp->m_sb.sb_uquotino ||
|
||||
ip->i_ino == mp->m_sb.sb_gquotino)
|
||||
return;
|
||||
@ -229,6 +226,7 @@ xfs_trans_mod_dquot(
|
||||
xfs_dqtrx_t *qtrx;
|
||||
|
||||
ASSERT(tp);
|
||||
ASSERT(XFS_IS_QUOTA_RUNNING(tp->t_mountp));
|
||||
qtrx = NULL;
|
||||
|
||||
if (tp->t_dqinfo == NULL)
|
||||
@ -346,7 +344,7 @@ xfs_trans_dqlockedjoin(
|
||||
* Unreserve just the reservations done by this transaction.
|
||||
* dquot is still left locked at exit.
|
||||
*/
|
||||
STATIC void
|
||||
void
|
||||
xfs_trans_apply_dquot_deltas(
|
||||
xfs_trans_t *tp)
|
||||
{
|
||||
@ -357,7 +355,7 @@ xfs_trans_apply_dquot_deltas(
|
||||
long totalbdelta;
|
||||
long totalrtbdelta;
|
||||
|
||||
if (! (tp->t_flags & XFS_TRANS_DQ_DIRTY))
|
||||
if (!(tp->t_flags & XFS_TRANS_DQ_DIRTY))
|
||||
return;
|
||||
|
||||
ASSERT(tp->t_dqinfo);
|
||||
@ -531,7 +529,7 @@ xfs_trans_apply_dquot_deltas(
|
||||
* we simply throw those away, since that's the expected behavior
|
||||
* when a transaction is curtailed without a commit.
|
||||
*/
|
||||
STATIC void
|
||||
void
|
||||
xfs_trans_unreserve_and_mod_dquots(
|
||||
xfs_trans_t *tp)
|
||||
{
|
||||
@ -768,7 +766,7 @@ xfs_trans_reserve_quota_bydquots(
|
||||
{
|
||||
int resvd = 0, error;
|
||||
|
||||
if (!XFS_IS_QUOTA_ON(mp))
|
||||
if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
|
||||
return 0;
|
||||
|
||||
if (tp && tp->t_dqinfo == NULL)
|
||||
@ -811,18 +809,17 @@ xfs_trans_reserve_quota_bydquots(
|
||||
* This doesn't change the actual usage, just the reservation.
|
||||
* The inode sent in is locked.
|
||||
*/
|
||||
STATIC int
|
||||
int
|
||||
xfs_trans_reserve_quota_nblks(
|
||||
xfs_trans_t *tp,
|
||||
xfs_mount_t *mp,
|
||||
xfs_inode_t *ip,
|
||||
long nblks,
|
||||
long ninos,
|
||||
uint flags)
|
||||
struct xfs_trans *tp,
|
||||
struct xfs_inode *ip,
|
||||
long nblks,
|
||||
long ninos,
|
||||
uint flags)
|
||||
{
|
||||
int error;
|
||||
struct xfs_mount *mp = ip->i_mount;
|
||||
|
||||
if (!XFS_IS_QUOTA_ON(mp))
|
||||
if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
|
||||
return 0;
|
||||
if (XFS_IS_PQUOTA_ON(mp))
|
||||
flags |= XFS_QMOPT_ENOSPC;
|
||||
@ -831,7 +828,6 @@ xfs_trans_reserve_quota_nblks(
|
||||
ASSERT(ip->i_ino != mp->m_sb.sb_gquotino);
|
||||
|
||||
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
|
||||
ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount));
|
||||
ASSERT((flags & ~(XFS_QMOPT_FORCE_RES | XFS_QMOPT_ENOSPC)) ==
|
||||
XFS_TRANS_DQ_RES_RTBLKS ||
|
||||
(flags & ~(XFS_QMOPT_FORCE_RES | XFS_QMOPT_ENOSPC)) ==
|
||||
@ -840,11 +836,9 @@ xfs_trans_reserve_quota_nblks(
|
||||
/*
|
||||
* Reserve nblks against these dquots, with trans as the mediator.
|
||||
*/
|
||||
error = xfs_trans_reserve_quota_bydquots(tp, mp,
|
||||
ip->i_udquot, ip->i_gdquot,
|
||||
nblks, ninos,
|
||||
flags);
|
||||
return error;
|
||||
return xfs_trans_reserve_quota_bydquots(tp, mp,
|
||||
ip->i_udquot, ip->i_gdquot,
|
||||
nblks, ninos, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -895,25 +889,15 @@ STATIC void
|
||||
xfs_trans_alloc_dqinfo(
|
||||
xfs_trans_t *tp)
|
||||
{
|
||||
(tp)->t_dqinfo = kmem_zone_zalloc(xfs_Gqm->qm_dqtrxzone, KM_SLEEP);
|
||||
tp->t_dqinfo = kmem_zone_zalloc(xfs_Gqm->qm_dqtrxzone, KM_SLEEP);
|
||||
}
|
||||
|
||||
STATIC void
|
||||
void
|
||||
xfs_trans_free_dqinfo(
|
||||
xfs_trans_t *tp)
|
||||
{
|
||||
if (!tp->t_dqinfo)
|
||||
return;
|
||||
kmem_zone_free(xfs_Gqm->qm_dqtrxzone, (tp)->t_dqinfo);
|
||||
(tp)->t_dqinfo = NULL;
|
||||
kmem_zone_free(xfs_Gqm->qm_dqtrxzone, tp->t_dqinfo);
|
||||
tp->t_dqinfo = NULL;
|
||||
}
|
||||
|
||||
xfs_dqtrxops_t xfs_trans_dquot_ops = {
|
||||
.qo_dup_dqinfo = xfs_trans_dup_dqinfo,
|
||||
.qo_free_dqinfo = xfs_trans_free_dqinfo,
|
||||
.qo_mod_dquot_byino = xfs_trans_mod_dquot_byino,
|
||||
.qo_apply_dquot_deltas = xfs_trans_apply_dquot_deltas,
|
||||
.qo_reserve_quota_nblks = xfs_trans_reserve_quota_nblks,
|
||||
.qo_reserve_quota_bydquots = xfs_trans_reserve_quota_bydquots,
|
||||
.qo_unreserve_and_mod_dquots = xfs_trans_unreserve_and_mod_dquots,
|
||||
};
|
||||
|
874
fs/xfs/xfs_acl.c
874
fs/xfs/xfs_acl.c
@ -1,874 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2001-2002,2005 Silicon Graphics, Inc.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it would be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
#include "xfs.h"
|
||||
#include "xfs_fs.h"
|
||||
#include "xfs_types.h"
|
||||
#include "xfs_bit.h"
|
||||
#include "xfs_inum.h"
|
||||
#include "xfs_ag.h"
|
||||
#include "xfs_dir2.h"
|
||||
#include "xfs_bmap_btree.h"
|
||||
#include "xfs_alloc_btree.h"
|
||||
#include "xfs_ialloc_btree.h"
|
||||
#include "xfs_dir2_sf.h"
|
||||
#include "xfs_attr_sf.h"
|
||||
#include "xfs_dinode.h"
|
||||
#include "xfs_inode.h"
|
||||
#include "xfs_btree.h"
|
||||
#include "xfs_acl.h"
|
||||
#include "xfs_attr.h"
|
||||
#include "xfs_vnodeops.h"
|
||||
|
||||
#include <linux/capability.h>
|
||||
#include <linux/posix_acl_xattr.h>
|
||||
|
||||
STATIC int xfs_acl_setmode(struct inode *, xfs_acl_t *, int *);
|
||||
STATIC void xfs_acl_filter_mode(mode_t, xfs_acl_t *);
|
||||
STATIC void xfs_acl_get_endian(xfs_acl_t *);
|
||||
STATIC int xfs_acl_access(uid_t, gid_t, xfs_acl_t *, mode_t, cred_t *);
|
||||
STATIC int xfs_acl_invalid(xfs_acl_t *);
|
||||
STATIC void xfs_acl_sync_mode(mode_t, xfs_acl_t *);
|
||||
STATIC void xfs_acl_get_attr(struct inode *, xfs_acl_t *, int, int, int *);
|
||||
STATIC void xfs_acl_set_attr(struct inode *, xfs_acl_t *, int, int *);
|
||||
STATIC int xfs_acl_allow_set(struct inode *, int);
|
||||
|
||||
kmem_zone_t *xfs_acl_zone;
|
||||
|
||||
|
||||
/*
|
||||
* Test for existence of access ACL attribute as efficiently as possible.
|
||||
*/
|
||||
int
|
||||
xfs_acl_vhasacl_access(
|
||||
struct inode *vp)
|
||||
{
|
||||
int error;
|
||||
|
||||
xfs_acl_get_attr(vp, NULL, _ACL_TYPE_ACCESS, ATTR_KERNOVAL, &error);
|
||||
return (error == 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Test for existence of default ACL attribute as efficiently as possible.
|
||||
*/
|
||||
int
|
||||
xfs_acl_vhasacl_default(
|
||||
struct inode *vp)
|
||||
{
|
||||
int error;
|
||||
|
||||
if (!S_ISDIR(vp->i_mode))
|
||||
return 0;
|
||||
xfs_acl_get_attr(vp, NULL, _ACL_TYPE_DEFAULT, ATTR_KERNOVAL, &error);
|
||||
return (error == 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert from extended attribute representation to in-memory for XFS.
|
||||
*/
|
||||
STATIC int
|
||||
posix_acl_xattr_to_xfs(
|
||||
posix_acl_xattr_header *src,
|
||||
size_t size,
|
||||
xfs_acl_t *dest)
|
||||
{
|
||||
posix_acl_xattr_entry *src_entry;
|
||||
xfs_acl_entry_t *dest_entry;
|
||||
int n;
|
||||
|
||||
if (!src || !dest)
|
||||
return EINVAL;
|
||||
|
||||
if (size < sizeof(posix_acl_xattr_header))
|
||||
return EINVAL;
|
||||
|
||||
if (src->a_version != cpu_to_le32(POSIX_ACL_XATTR_VERSION))
|
||||
return EOPNOTSUPP;
|
||||
|
||||
memset(dest, 0, sizeof(xfs_acl_t));
|
||||
dest->acl_cnt = posix_acl_xattr_count(size);
|
||||
if (dest->acl_cnt < 0 || dest->acl_cnt > XFS_ACL_MAX_ENTRIES)
|
||||
return EINVAL;
|
||||
|
||||
/*
|
||||
* acl_set_file(3) may request that we set default ACLs with
|
||||
* zero length -- defend (gracefully) against that here.
|
||||
*/
|
||||
if (!dest->acl_cnt)
|
||||
return 0;
|
||||
|
||||
src_entry = (posix_acl_xattr_entry *)((char *)src + sizeof(*src));
|
||||
dest_entry = &dest->acl_entry[0];
|
||||
|
||||
for (n = 0; n < dest->acl_cnt; n++, src_entry++, dest_entry++) {
|
||||
dest_entry->ae_perm = le16_to_cpu(src_entry->e_perm);
|
||||
if (_ACL_PERM_INVALID(dest_entry->ae_perm))
|
||||
return EINVAL;
|
||||
dest_entry->ae_tag = le16_to_cpu(src_entry->e_tag);
|
||||
switch(dest_entry->ae_tag) {
|
||||
case ACL_USER:
|
||||
case ACL_GROUP:
|
||||
dest_entry->ae_id = le32_to_cpu(src_entry->e_id);
|
||||
break;
|
||||
case ACL_USER_OBJ:
|
||||
case ACL_GROUP_OBJ:
|
||||
case ACL_MASK:
|
||||
case ACL_OTHER:
|
||||
dest_entry->ae_id = ACL_UNDEFINED_ID;
|
||||
break;
|
||||
default:
|
||||
return EINVAL;
|
||||
}
|
||||
}
|
||||
if (xfs_acl_invalid(dest))
|
||||
return EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Comparison function called from xfs_sort().
|
||||
* Primary key is ae_tag, secondary key is ae_id.
|
||||
*/
|
||||
STATIC int
|
||||
xfs_acl_entry_compare(
|
||||
const void *va,
|
||||
const void *vb)
|
||||
{
|
||||
xfs_acl_entry_t *a = (xfs_acl_entry_t *)va,
|
||||
*b = (xfs_acl_entry_t *)vb;
|
||||
|
||||
if (a->ae_tag == b->ae_tag)
|
||||
return (a->ae_id - b->ae_id);
|
||||
return (a->ae_tag - b->ae_tag);
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert from in-memory XFS to extended attribute representation.
|
||||
*/
|
||||
STATIC int
|
||||
posix_acl_xfs_to_xattr(
|
||||
xfs_acl_t *src,
|
||||
posix_acl_xattr_header *dest,
|
||||
size_t size)
|
||||
{
|
||||
int n;
|
||||
size_t new_size = posix_acl_xattr_size(src->acl_cnt);
|
||||
posix_acl_xattr_entry *dest_entry;
|
||||
xfs_acl_entry_t *src_entry;
|
||||
|
||||
if (size < new_size)
|
||||
return -ERANGE;
|
||||
|
||||
/* Need to sort src XFS ACL by <ae_tag,ae_id> */
|
||||
xfs_sort(src->acl_entry, src->acl_cnt, sizeof(src->acl_entry[0]),
|
||||
xfs_acl_entry_compare);
|
||||
|
||||
dest->a_version = cpu_to_le32(POSIX_ACL_XATTR_VERSION);
|
||||
dest_entry = &dest->a_entries[0];
|
||||
src_entry = &src->acl_entry[0];
|
||||
for (n = 0; n < src->acl_cnt; n++, dest_entry++, src_entry++) {
|
||||
dest_entry->e_perm = cpu_to_le16(src_entry->ae_perm);
|
||||
if (_ACL_PERM_INVALID(src_entry->ae_perm))
|
||||
return -EINVAL;
|
||||
dest_entry->e_tag = cpu_to_le16(src_entry->ae_tag);
|
||||
switch (src_entry->ae_tag) {
|
||||
case ACL_USER:
|
||||
case ACL_GROUP:
|
||||
dest_entry->e_id = cpu_to_le32(src_entry->ae_id);
|
||||
break;
|
||||
case ACL_USER_OBJ:
|
||||
case ACL_GROUP_OBJ:
|
||||
case ACL_MASK:
|
||||
case ACL_OTHER:
|
||||
dest_entry->e_id = cpu_to_le32(ACL_UNDEFINED_ID);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
return new_size;
|
||||
}
|
||||
|
||||
int
|
||||
xfs_acl_vget(
|
||||
struct inode *vp,
|
||||
void *acl,
|
||||
size_t size,
|
||||
int kind)
|
||||
{
|
||||
int error;
|
||||
xfs_acl_t *xfs_acl = NULL;
|
||||
posix_acl_xattr_header *ext_acl = acl;
|
||||
int flags = 0;
|
||||
|
||||
if(size) {
|
||||
if (!(_ACL_ALLOC(xfs_acl))) {
|
||||
error = ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
memset(xfs_acl, 0, sizeof(xfs_acl_t));
|
||||
} else
|
||||
flags = ATTR_KERNOVAL;
|
||||
|
||||
xfs_acl_get_attr(vp, xfs_acl, kind, flags, &error);
|
||||
if (error)
|
||||
goto out;
|
||||
|
||||
if (!size) {
|
||||
error = -posix_acl_xattr_size(XFS_ACL_MAX_ENTRIES);
|
||||
} else {
|
||||
if (xfs_acl_invalid(xfs_acl)) {
|
||||
error = EINVAL;
|
||||
goto out;
|
||||
}
|
||||
if (kind == _ACL_TYPE_ACCESS)
|
||||
xfs_acl_sync_mode(XFS_I(vp)->i_d.di_mode, xfs_acl);
|
||||
error = -posix_acl_xfs_to_xattr(xfs_acl, ext_acl, size);
|
||||
}
|
||||
out:
|
||||
if(xfs_acl)
|
||||
_ACL_FREE(xfs_acl);
|
||||
return -error;
|
||||
}
|
||||
|
||||
int
|
||||
xfs_acl_vremove(
|
||||
struct inode *vp,
|
||||
int kind)
|
||||
{
|
||||
int error;
|
||||
|
||||
error = xfs_acl_allow_set(vp, kind);
|
||||
if (!error) {
|
||||
error = xfs_attr_remove(XFS_I(vp),
|
||||
kind == _ACL_TYPE_DEFAULT?
|
||||
SGI_ACL_DEFAULT: SGI_ACL_FILE,
|
||||
ATTR_ROOT);
|
||||
if (error == ENOATTR)
|
||||
error = 0; /* 'scool */
|
||||
}
|
||||
return -error;
|
||||
}
|
||||
|
||||
int
|
||||
xfs_acl_vset(
|
||||
struct inode *vp,
|
||||
void *acl,
|
||||
size_t size,
|
||||
int kind)
|
||||
{
|
||||
posix_acl_xattr_header *ext_acl = acl;
|
||||
xfs_acl_t *xfs_acl;
|
||||
int error;
|
||||
int basicperms = 0; /* more than std unix perms? */
|
||||
|
||||
if (!acl)
|
||||
return -EINVAL;
|
||||
|
||||
if (!(_ACL_ALLOC(xfs_acl)))
|
||||
return -ENOMEM;
|
||||
|
||||
error = posix_acl_xattr_to_xfs(ext_acl, size, xfs_acl);
|
||||
if (error) {
|
||||
_ACL_FREE(xfs_acl);
|
||||
return -error;
|
||||
}
|
||||
if (!xfs_acl->acl_cnt) {
|
||||
_ACL_FREE(xfs_acl);
|
||||
return 0;
|
||||
}
|
||||
|
||||
error = xfs_acl_allow_set(vp, kind);
|
||||
|
||||
/* Incoming ACL exists, set file mode based on its value */
|
||||
if (!error && kind == _ACL_TYPE_ACCESS)
|
||||
error = xfs_acl_setmode(vp, xfs_acl, &basicperms);
|
||||
|
||||
if (error)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* If we have more than std unix permissions, set up the actual attr.
|
||||
* Otherwise, delete any existing attr. This prevents us from
|
||||
* having actual attrs for permissions that can be stored in the
|
||||
* standard permission bits.
|
||||
*/
|
||||
if (!basicperms) {
|
||||
xfs_acl_set_attr(vp, xfs_acl, kind, &error);
|
||||
} else {
|
||||
error = -xfs_acl_vremove(vp, _ACL_TYPE_ACCESS);
|
||||
}
|
||||
|
||||
out:
|
||||
_ACL_FREE(xfs_acl);
|
||||
return -error;
|
||||
}
|
||||
|
||||
int
|
||||
xfs_acl_iaccess(
|
||||
xfs_inode_t *ip,
|
||||
mode_t mode,
|
||||
cred_t *cr)
|
||||
{
|
||||
xfs_acl_t *acl;
|
||||
int rval;
|
||||
struct xfs_name acl_name = {SGI_ACL_FILE, SGI_ACL_FILE_SIZE};
|
||||
|
||||
if (!(_ACL_ALLOC(acl)))
|
||||
return -1;
|
||||
|
||||
/* If the file has no ACL return -1. */
|
||||
rval = sizeof(xfs_acl_t);
|
||||
if (xfs_attr_fetch(ip, &acl_name, (char *)acl, &rval, ATTR_ROOT)) {
|
||||
_ACL_FREE(acl);
|
||||
return -1;
|
||||
}
|
||||
xfs_acl_get_endian(acl);
|
||||
|
||||
/* If the file has an empty ACL return -1. */
|
||||
if (acl->acl_cnt == XFS_ACL_NOT_PRESENT) {
|
||||
_ACL_FREE(acl);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Synchronize ACL with mode bits */
|
||||
xfs_acl_sync_mode(ip->i_d.di_mode, acl);
|
||||
|
||||
rval = xfs_acl_access(ip->i_d.di_uid, ip->i_d.di_gid, acl, mode, cr);
|
||||
_ACL_FREE(acl);
|
||||
return rval;
|
||||
}
|
||||
|
||||
STATIC int
|
||||
xfs_acl_allow_set(
|
||||
struct inode *vp,
|
||||
int kind)
|
||||
{
|
||||
if (vp->i_flags & (S_IMMUTABLE|S_APPEND))
|
||||
return EPERM;
|
||||
if (kind == _ACL_TYPE_DEFAULT && !S_ISDIR(vp->i_mode))
|
||||
return ENOTDIR;
|
||||
if (vp->i_sb->s_flags & MS_RDONLY)
|
||||
return EROFS;
|
||||
if (XFS_I(vp)->i_d.di_uid != current_fsuid() && !capable(CAP_FOWNER))
|
||||
return EPERM;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Note: cr is only used here for the capability check if the ACL test fails.
|
||||
* It is not used to find out the credentials uid or groups etc, as was
|
||||
* done in IRIX. It is assumed that the uid and groups for the current
|
||||
* thread are taken from "current" instead of the cr parameter.
|
||||
*/
|
||||
STATIC int
|
||||
xfs_acl_access(
|
||||
uid_t fuid,
|
||||
gid_t fgid,
|
||||
xfs_acl_t *fap,
|
||||
mode_t md,
|
||||
cred_t *cr)
|
||||
{
|
||||
xfs_acl_entry_t matched;
|
||||
int i, allows;
|
||||
int maskallows = -1; /* true, but not 1, either */
|
||||
int seen_userobj = 0;
|
||||
|
||||
matched.ae_tag = 0; /* Invalid type */
|
||||
matched.ae_perm = 0;
|
||||
|
||||
for (i = 0; i < fap->acl_cnt; i++) {
|
||||
/*
|
||||
* Break out if we've got a user_obj entry or
|
||||
* a user entry and the mask (and have processed USER_OBJ)
|
||||
*/
|
||||
if (matched.ae_tag == ACL_USER_OBJ)
|
||||
break;
|
||||
if (matched.ae_tag == ACL_USER) {
|
||||
if (maskallows != -1 && seen_userobj)
|
||||
break;
|
||||
if (fap->acl_entry[i].ae_tag != ACL_MASK &&
|
||||
fap->acl_entry[i].ae_tag != ACL_USER_OBJ)
|
||||
continue;
|
||||
}
|
||||
/* True if this entry allows the requested access */
|
||||
allows = ((fap->acl_entry[i].ae_perm & md) == md);
|
||||
|
||||
switch (fap->acl_entry[i].ae_tag) {
|
||||
case ACL_USER_OBJ:
|
||||
seen_userobj = 1;
|
||||
if (fuid != current_fsuid())
|
||||
continue;
|
||||
matched.ae_tag = ACL_USER_OBJ;
|
||||
matched.ae_perm = allows;
|
||||
break;
|
||||
case ACL_USER:
|
||||
if (fap->acl_entry[i].ae_id != current_fsuid())
|
||||
continue;
|
||||
matched.ae_tag = ACL_USER;
|
||||
matched.ae_perm = allows;
|
||||
break;
|
||||
case ACL_GROUP_OBJ:
|
||||
if ((matched.ae_tag == ACL_GROUP_OBJ ||
|
||||
matched.ae_tag == ACL_GROUP) && !allows)
|
||||
continue;
|
||||
if (!in_group_p(fgid))
|
||||
continue;
|
||||
matched.ae_tag = ACL_GROUP_OBJ;
|
||||
matched.ae_perm = allows;
|
||||
break;
|
||||
case ACL_GROUP:
|
||||
if ((matched.ae_tag == ACL_GROUP_OBJ ||
|
||||
matched.ae_tag == ACL_GROUP) && !allows)
|
||||
continue;
|
||||
if (!in_group_p(fap->acl_entry[i].ae_id))
|
||||
continue;
|
||||
matched.ae_tag = ACL_GROUP;
|
||||
matched.ae_perm = allows;
|
||||
break;
|
||||
case ACL_MASK:
|
||||
maskallows = allows;
|
||||
break;
|
||||
case ACL_OTHER:
|
||||
if (matched.ae_tag != 0)
|
||||
continue;
|
||||
matched.ae_tag = ACL_OTHER;
|
||||
matched.ae_perm = allows;
|
||||
break;
|
||||
}
|
||||
}
|
||||
/*
|
||||
* First possibility is that no matched entry allows access.
|
||||
* The capability to override DAC may exist, so check for it.
|
||||
*/
|
||||
switch (matched.ae_tag) {
|
||||
case ACL_OTHER:
|
||||
case ACL_USER_OBJ:
|
||||
if (matched.ae_perm)
|
||||
return 0;
|
||||
break;
|
||||
case ACL_USER:
|
||||
case ACL_GROUP_OBJ:
|
||||
case ACL_GROUP:
|
||||
if (maskallows && matched.ae_perm)
|
||||
return 0;
|
||||
break;
|
||||
case 0:
|
||||
break;
|
||||
}
|
||||
|
||||
/* EACCES tells generic_permission to check for capability overrides */
|
||||
return EACCES;
|
||||
}
|
||||
|
||||
/*
|
||||
* ACL validity checker.
|
||||
* This acl validation routine checks each ACL entry read in makes sense.
|
||||
*/
|
||||
STATIC int
|
||||
xfs_acl_invalid(
|
||||
xfs_acl_t *aclp)
|
||||
{
|
||||
xfs_acl_entry_t *entry, *e;
|
||||
int user = 0, group = 0, other = 0, mask = 0;
|
||||
int mask_required = 0;
|
||||
int i, j;
|
||||
|
||||
if (!aclp)
|
||||
goto acl_invalid;
|
||||
|
||||
if (aclp->acl_cnt > XFS_ACL_MAX_ENTRIES)
|
||||
goto acl_invalid;
|
||||
|
||||
for (i = 0; i < aclp->acl_cnt; i++) {
|
||||
entry = &aclp->acl_entry[i];
|
||||
switch (entry->ae_tag) {
|
||||
case ACL_USER_OBJ:
|
||||
if (user++)
|
||||
goto acl_invalid;
|
||||
break;
|
||||
case ACL_GROUP_OBJ:
|
||||
if (group++)
|
||||
goto acl_invalid;
|
||||
break;
|
||||
case ACL_OTHER:
|
||||
if (other++)
|
||||
goto acl_invalid;
|
||||
break;
|
||||
case ACL_USER:
|
||||
case ACL_GROUP:
|
||||
for (j = i + 1; j < aclp->acl_cnt; j++) {
|
||||
e = &aclp->acl_entry[j];
|
||||
if (e->ae_id == entry->ae_id &&
|
||||
e->ae_tag == entry->ae_tag)
|
||||
goto acl_invalid;
|
||||
}
|
||||
mask_required++;
|
||||
break;
|
||||
case ACL_MASK:
|
||||
if (mask++)
|
||||
goto acl_invalid;
|
||||
break;
|
||||
default:
|
||||
goto acl_invalid;
|
||||
}
|
||||
}
|
||||
if (!user || !group || !other || (mask_required && !mask))
|
||||
goto acl_invalid;
|
||||
else
|
||||
return 0;
|
||||
acl_invalid:
|
||||
return EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Do ACL endian conversion.
|
||||
*/
|
||||
STATIC void
|
||||
xfs_acl_get_endian(
|
||||
xfs_acl_t *aclp)
|
||||
{
|
||||
xfs_acl_entry_t *ace, *end;
|
||||
|
||||
INT_SET(aclp->acl_cnt, ARCH_CONVERT, aclp->acl_cnt);
|
||||
end = &aclp->acl_entry[0]+aclp->acl_cnt;
|
||||
for (ace = &aclp->acl_entry[0]; ace < end; ace++) {
|
||||
INT_SET(ace->ae_tag, ARCH_CONVERT, ace->ae_tag);
|
||||
INT_SET(ace->ae_id, ARCH_CONVERT, ace->ae_id);
|
||||
INT_SET(ace->ae_perm, ARCH_CONVERT, ace->ae_perm);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Get the ACL from the EA and do endian conversion.
|
||||
*/
|
||||
STATIC void
|
||||
xfs_acl_get_attr(
|
||||
struct inode *vp,
|
||||
xfs_acl_t *aclp,
|
||||
int kind,
|
||||
int flags,
|
||||
int *error)
|
||||
{
|
||||
int len = sizeof(xfs_acl_t);
|
||||
|
||||
ASSERT((flags & ATTR_KERNOVAL) ? (aclp == NULL) : 1);
|
||||
flags |= ATTR_ROOT;
|
||||
*error = xfs_attr_get(XFS_I(vp),
|
||||
kind == _ACL_TYPE_ACCESS ?
|
||||
SGI_ACL_FILE : SGI_ACL_DEFAULT,
|
||||
(char *)aclp, &len, flags);
|
||||
if (*error || (flags & ATTR_KERNOVAL))
|
||||
return;
|
||||
xfs_acl_get_endian(aclp);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the EA with the ACL and do endian conversion.
|
||||
*/
|
||||
STATIC void
|
||||
xfs_acl_set_attr(
|
||||
struct inode *vp,
|
||||
xfs_acl_t *aclp,
|
||||
int kind,
|
||||
int *error)
|
||||
{
|
||||
xfs_acl_entry_t *ace, *newace, *end;
|
||||
xfs_acl_t *newacl;
|
||||
int len;
|
||||
|
||||
if (!(_ACL_ALLOC(newacl))) {
|
||||
*error = ENOMEM;
|
||||
return;
|
||||
}
|
||||
|
||||
len = sizeof(xfs_acl_t) -
|
||||
(sizeof(xfs_acl_entry_t) * (XFS_ACL_MAX_ENTRIES - aclp->acl_cnt));
|
||||
end = &aclp->acl_entry[0]+aclp->acl_cnt;
|
||||
for (ace = &aclp->acl_entry[0], newace = &newacl->acl_entry[0];
|
||||
ace < end;
|
||||
ace++, newace++) {
|
||||
INT_SET(newace->ae_tag, ARCH_CONVERT, ace->ae_tag);
|
||||
INT_SET(newace->ae_id, ARCH_CONVERT, ace->ae_id);
|
||||
INT_SET(newace->ae_perm, ARCH_CONVERT, ace->ae_perm);
|
||||
}
|
||||
INT_SET(newacl->acl_cnt, ARCH_CONVERT, aclp->acl_cnt);
|
||||
*error = xfs_attr_set(XFS_I(vp),
|
||||
kind == _ACL_TYPE_ACCESS ?
|
||||
SGI_ACL_FILE: SGI_ACL_DEFAULT,
|
||||
(char *)newacl, len, ATTR_ROOT);
|
||||
_ACL_FREE(newacl);
|
||||
}
|
||||
|
||||
int
|
||||
xfs_acl_vtoacl(
|
||||
struct inode *vp,
|
||||
xfs_acl_t *access_acl,
|
||||
xfs_acl_t *default_acl)
|
||||
{
|
||||
int error = 0;
|
||||
|
||||
if (access_acl) {
|
||||
/*
|
||||
* Get the Access ACL and the mode. If either cannot
|
||||
* be obtained for some reason, invalidate the access ACL.
|
||||
*/
|
||||
xfs_acl_get_attr(vp, access_acl, _ACL_TYPE_ACCESS, 0, &error);
|
||||
if (error)
|
||||
access_acl->acl_cnt = XFS_ACL_NOT_PRESENT;
|
||||
else /* We have a good ACL and the file mode, synchronize. */
|
||||
xfs_acl_sync_mode(XFS_I(vp)->i_d.di_mode, access_acl);
|
||||
}
|
||||
|
||||
if (default_acl) {
|
||||
xfs_acl_get_attr(vp, default_acl, _ACL_TYPE_DEFAULT, 0, &error);
|
||||
if (error)
|
||||
default_acl->acl_cnt = XFS_ACL_NOT_PRESENT;
|
||||
}
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* This function retrieves the parent directory's acl, processes it
|
||||
* and lets the child inherit the acl(s) that it should.
|
||||
*/
|
||||
int
|
||||
xfs_acl_inherit(
|
||||
struct inode *vp,
|
||||
mode_t mode,
|
||||
xfs_acl_t *pdaclp)
|
||||
{
|
||||
xfs_acl_t *cacl;
|
||||
int error = 0;
|
||||
int basicperms = 0;
|
||||
|
||||
/*
|
||||
* If the parent does not have a default ACL, or it's an
|
||||
* invalid ACL, we're done.
|
||||
*/
|
||||
if (!vp)
|
||||
return 0;
|
||||
if (!pdaclp || xfs_acl_invalid(pdaclp))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Copy the default ACL of the containing directory to
|
||||
* the access ACL of the new file and use the mode that
|
||||
* was passed in to set up the correct initial values for
|
||||
* the u::,g::[m::], and o:: entries. This is what makes
|
||||
* umask() "work" with ACL's.
|
||||
*/
|
||||
|
||||
if (!(_ACL_ALLOC(cacl)))
|
||||
return ENOMEM;
|
||||
|
||||
memcpy(cacl, pdaclp, sizeof(xfs_acl_t));
|
||||
xfs_acl_filter_mode(mode, cacl);
|
||||
error = xfs_acl_setmode(vp, cacl, &basicperms);
|
||||
if (error)
|
||||
goto out_error;
|
||||
|
||||
/*
|
||||
* Set the Default and Access ACL on the file. The mode is already
|
||||
* set on the file, so we don't need to worry about that.
|
||||
*
|
||||
* If the new file is a directory, its default ACL is a copy of
|
||||
* the containing directory's default ACL.
|
||||
*/
|
||||
if (S_ISDIR(vp->i_mode))
|
||||
xfs_acl_set_attr(vp, pdaclp, _ACL_TYPE_DEFAULT, &error);
|
||||
if (!error && !basicperms)
|
||||
xfs_acl_set_attr(vp, cacl, _ACL_TYPE_ACCESS, &error);
|
||||
out_error:
|
||||
_ACL_FREE(cacl);
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set up the correct mode on the file based on the supplied ACL. This
|
||||
* makes sure that the mode on the file reflects the state of the
|
||||
* u::,g::[m::], and o:: entries in the ACL. Since the mode is where
|
||||
* the ACL is going to get the permissions for these entries, we must
|
||||
* synchronize the mode whenever we set the ACL on a file.
|
||||
*/
|
||||
STATIC int
|
||||
xfs_acl_setmode(
|
||||
struct inode *vp,
|
||||
xfs_acl_t *acl,
|
||||
int *basicperms)
|
||||
{
|
||||
struct iattr iattr;
|
||||
xfs_acl_entry_t *ap;
|
||||
xfs_acl_entry_t *gap = NULL;
|
||||
int i, nomask = 1;
|
||||
|
||||
*basicperms = 1;
|
||||
|
||||
if (acl->acl_cnt == XFS_ACL_NOT_PRESENT)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Copy the u::, g::, o::, and m:: bits from the ACL into the
|
||||
* mode. The m:: bits take precedence over the g:: bits.
|
||||
*/
|
||||
iattr.ia_valid = ATTR_MODE;
|
||||
iattr.ia_mode = XFS_I(vp)->i_d.di_mode;
|
||||
iattr.ia_mode &= ~(S_IRWXU|S_IRWXG|S_IRWXO);
|
||||
ap = acl->acl_entry;
|
||||
for (i = 0; i < acl->acl_cnt; ++i) {
|
||||
switch (ap->ae_tag) {
|
||||
case ACL_USER_OBJ:
|
||||
iattr.ia_mode |= ap->ae_perm << 6;
|
||||
break;
|
||||
case ACL_GROUP_OBJ:
|
||||
gap = ap;
|
||||
break;
|
||||
case ACL_MASK: /* more than just standard modes */
|
||||
nomask = 0;
|
||||
iattr.ia_mode |= ap->ae_perm << 3;
|
||||
*basicperms = 0;
|
||||
break;
|
||||
case ACL_OTHER:
|
||||
iattr.ia_mode |= ap->ae_perm;
|
||||
break;
|
||||
default: /* more than just standard modes */
|
||||
*basicperms = 0;
|
||||
break;
|
||||
}
|
||||
ap++;
|
||||
}
|
||||
|
||||
/* Set the group bits from ACL_GROUP_OBJ if there's no ACL_MASK */
|
||||
if (gap && nomask)
|
||||
iattr.ia_mode |= gap->ae_perm << 3;
|
||||
|
||||
return xfs_setattr(XFS_I(vp), &iattr, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* The permissions for the special ACL entries (u::, g::[m::], o::) are
|
||||
* actually stored in the file mode (if there is both a group and a mask,
|
||||
* the group is stored in the ACL entry and the mask is stored on the file).
|
||||
* This allows the mode to remain automatically in sync with the ACL without
|
||||
* the need for a call-back to the ACL system at every point where the mode
|
||||
* could change. This function takes the permissions from the specified mode
|
||||
* and places it in the supplied ACL.
|
||||
*
|
||||
* This implementation draws its validity from the fact that, when the ACL
|
||||
* was assigned, the mode was copied from the ACL.
|
||||
* If the mode did not change, therefore, the mode remains exactly what was
|
||||
* taken from the special ACL entries at assignment.
|
||||
* If a subsequent chmod() was done, the POSIX spec says that the change in
|
||||
* mode must cause an update to the ACL seen at user level and used for
|
||||
* access checks. Before and after a mode change, therefore, the file mode
|
||||
* most accurately reflects what the special ACL entries should permit/deny.
|
||||
*
|
||||
* CAVEAT: If someone sets the SGI_ACL_FILE attribute directly,
|
||||
* the existing mode bits will override whatever is in the
|
||||
* ACL. Similarly, if there is a pre-existing ACL that was
|
||||
* never in sync with its mode (owing to a bug in 6.5 and
|
||||
* before), it will now magically (or mystically) be
|
||||
* synchronized. This could cause slight astonishment, but
|
||||
* it is better than inconsistent permissions.
|
||||
*
|
||||
* The supplied ACL is a template that may contain any combination
|
||||
* of special entries. These are treated as place holders when we fill
|
||||
* out the ACL. This routine does not add or remove special entries, it
|
||||
* simply unites each special entry with its associated set of permissions.
|
||||
*/
|
||||
STATIC void
|
||||
xfs_acl_sync_mode(
|
||||
mode_t mode,
|
||||
xfs_acl_t *acl)
|
||||
{
|
||||
int i, nomask = 1;
|
||||
xfs_acl_entry_t *ap;
|
||||
xfs_acl_entry_t *gap = NULL;
|
||||
|
||||
/*
|
||||
* Set ACL entries. POSIX1003.1eD16 requires that the MASK
|
||||
* be set instead of the GROUP entry, if there is a MASK.
|
||||
*/
|
||||
for (ap = acl->acl_entry, i = 0; i < acl->acl_cnt; ap++, i++) {
|
||||
switch (ap->ae_tag) {
|
||||
case ACL_USER_OBJ:
|
||||
ap->ae_perm = (mode >> 6) & 0x7;
|
||||
break;
|
||||
case ACL_GROUP_OBJ:
|
||||
gap = ap;
|
||||
break;
|
||||
case ACL_MASK:
|
||||
nomask = 0;
|
||||
ap->ae_perm = (mode >> 3) & 0x7;
|
||||
break;
|
||||
case ACL_OTHER:
|
||||
ap->ae_perm = mode & 0x7;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
/* Set the ACL_GROUP_OBJ if there's no ACL_MASK */
|
||||
if (gap && nomask)
|
||||
gap->ae_perm = (mode >> 3) & 0x7;
|
||||
}
|
||||
|
||||
/*
|
||||
* When inheriting an Access ACL from a directory Default ACL,
|
||||
* the ACL bits are set to the intersection of the ACL default
|
||||
* permission bits and the file permission bits in mode. If there
|
||||
* are no permission bits on the file then we must not give them
|
||||
* the ACL. This is what what makes umask() work with ACLs.
|
||||
*/
|
||||
STATIC void
|
||||
xfs_acl_filter_mode(
|
||||
mode_t mode,
|
||||
xfs_acl_t *acl)
|
||||
{
|
||||
int i, nomask = 1;
|
||||
xfs_acl_entry_t *ap;
|
||||
xfs_acl_entry_t *gap = NULL;
|
||||
|
||||
/*
|
||||
* Set ACL entries. POSIX1003.1eD16 requires that the MASK
|
||||
* be merged with GROUP entry, if there is a MASK.
|
||||
*/
|
||||
for (ap = acl->acl_entry, i = 0; i < acl->acl_cnt; ap++, i++) {
|
||||
switch (ap->ae_tag) {
|
||||
case ACL_USER_OBJ:
|
||||
ap->ae_perm &= (mode >> 6) & 0x7;
|
||||
break;
|
||||
case ACL_GROUP_OBJ:
|
||||
gap = ap;
|
||||
break;
|
||||
case ACL_MASK:
|
||||
nomask = 0;
|
||||
ap->ae_perm &= (mode >> 3) & 0x7;
|
||||
break;
|
||||
case ACL_OTHER:
|
||||
ap->ae_perm &= mode & 0x7;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
/* Set the ACL_GROUP_OBJ if there's no ACL_MASK */
|
||||
if (gap && nomask)
|
||||
gap->ae_perm &= (mode >> 3) & 0x7;
|
||||
}
|
@ -18,81 +18,48 @@
|
||||
#ifndef __XFS_ACL_H__
|
||||
#define __XFS_ACL_H__
|
||||
|
||||
/*
|
||||
* Access Control Lists
|
||||
*/
|
||||
typedef __uint16_t xfs_acl_perm_t;
|
||||
typedef __int32_t xfs_acl_tag_t;
|
||||
typedef __int32_t xfs_acl_id_t;
|
||||
struct inode;
|
||||
struct posix_acl;
|
||||
struct xfs_inode;
|
||||
|
||||
#define XFS_ACL_MAX_ENTRIES 25
|
||||
#define XFS_ACL_NOT_PRESENT (-1)
|
||||
|
||||
typedef struct xfs_acl_entry {
|
||||
xfs_acl_tag_t ae_tag;
|
||||
xfs_acl_id_t ae_id;
|
||||
xfs_acl_perm_t ae_perm;
|
||||
} xfs_acl_entry_t;
|
||||
|
||||
typedef struct xfs_acl {
|
||||
__int32_t acl_cnt;
|
||||
xfs_acl_entry_t acl_entry[XFS_ACL_MAX_ENTRIES];
|
||||
} xfs_acl_t;
|
||||
/* On-disk XFS access control list structure */
|
||||
struct xfs_acl {
|
||||
__be32 acl_cnt;
|
||||
struct xfs_acl_entry {
|
||||
__be32 ae_tag;
|
||||
__be32 ae_id;
|
||||
__be16 ae_perm;
|
||||
} acl_entry[XFS_ACL_MAX_ENTRIES];
|
||||
};
|
||||
|
||||
/* On-disk XFS extended attribute names */
|
||||
#define SGI_ACL_FILE "SGI_ACL_FILE"
|
||||
#define SGI_ACL_DEFAULT "SGI_ACL_DEFAULT"
|
||||
#define SGI_ACL_FILE "SGI_ACL_FILE"
|
||||
#define SGI_ACL_DEFAULT "SGI_ACL_DEFAULT"
|
||||
#define SGI_ACL_FILE_SIZE (sizeof(SGI_ACL_FILE)-1)
|
||||
#define SGI_ACL_DEFAULT_SIZE (sizeof(SGI_ACL_DEFAULT)-1)
|
||||
|
||||
#define _ACL_TYPE_ACCESS 1
|
||||
#define _ACL_TYPE_DEFAULT 2
|
||||
|
||||
#ifdef CONFIG_XFS_POSIX_ACL
|
||||
extern int xfs_check_acl(struct inode *inode, int mask);
|
||||
extern struct posix_acl *xfs_get_acl(struct inode *inode, int type);
|
||||
extern int xfs_inherit_acl(struct inode *inode, struct posix_acl *default_acl);
|
||||
extern int xfs_acl_chmod(struct inode *inode);
|
||||
extern void xfs_inode_init_acls(struct xfs_inode *ip);
|
||||
extern void xfs_inode_clear_acls(struct xfs_inode *ip);
|
||||
extern int posix_acl_access_exists(struct inode *inode);
|
||||
extern int posix_acl_default_exists(struct inode *inode);
|
||||
|
||||
struct vattr;
|
||||
struct xfs_inode;
|
||||
|
||||
extern struct kmem_zone *xfs_acl_zone;
|
||||
#define xfs_acl_zone_init(zone, name) \
|
||||
(zone) = kmem_zone_init(sizeof(xfs_acl_t), (name))
|
||||
#define xfs_acl_zone_destroy(zone) kmem_zone_destroy(zone)
|
||||
|
||||
extern int xfs_acl_inherit(struct inode *, mode_t mode, xfs_acl_t *);
|
||||
extern int xfs_acl_iaccess(struct xfs_inode *, mode_t, cred_t *);
|
||||
extern int xfs_acl_vtoacl(struct inode *, xfs_acl_t *, xfs_acl_t *);
|
||||
extern int xfs_acl_vhasacl_access(struct inode *);
|
||||
extern int xfs_acl_vhasacl_default(struct inode *);
|
||||
extern int xfs_acl_vset(struct inode *, void *, size_t, int);
|
||||
extern int xfs_acl_vget(struct inode *, void *, size_t, int);
|
||||
extern int xfs_acl_vremove(struct inode *, int);
|
||||
|
||||
#define _ACL_PERM_INVALID(perm) ((perm) & ~(ACL_READ|ACL_WRITE|ACL_EXECUTE))
|
||||
|
||||
#define _ACL_INHERIT(c,m,d) (xfs_acl_inherit(c,m,d))
|
||||
#define _ACL_GET_ACCESS(pv,pa) (xfs_acl_vtoacl(pv,pa,NULL) == 0)
|
||||
#define _ACL_GET_DEFAULT(pv,pd) (xfs_acl_vtoacl(pv,NULL,pd) == 0)
|
||||
#define _ACL_ACCESS_EXISTS xfs_acl_vhasacl_access
|
||||
#define _ACL_DEFAULT_EXISTS xfs_acl_vhasacl_default
|
||||
|
||||
#define _ACL_ALLOC(a) ((a) = kmem_zone_alloc(xfs_acl_zone, KM_SLEEP))
|
||||
#define _ACL_FREE(a) ((a)? kmem_zone_free(xfs_acl_zone, (a)):(void)0)
|
||||
|
||||
extern struct xattr_handler xfs_xattr_system_handler;
|
||||
#else
|
||||
#define xfs_acl_zone_init(zone,name)
|
||||
#define xfs_acl_zone_destroy(zone)
|
||||
#define xfs_acl_vset(v,p,sz,t) (-EOPNOTSUPP)
|
||||
#define xfs_acl_vget(v,p,sz,t) (-EOPNOTSUPP)
|
||||
#define xfs_acl_vremove(v,t) (-EOPNOTSUPP)
|
||||
#define xfs_acl_vhasacl_access(v) (0)
|
||||
#define xfs_acl_vhasacl_default(v) (0)
|
||||
#define _ACL_ALLOC(a) (1) /* successfully allocate nothing */
|
||||
#define _ACL_FREE(a) ((void)0)
|
||||
#define _ACL_INHERIT(c,m,d) (0)
|
||||
#define _ACL_GET_ACCESS(pv,pa) (0)
|
||||
#define _ACL_GET_DEFAULT(pv,pd) (0)
|
||||
#define _ACL_ACCESS_EXISTS (NULL)
|
||||
#define _ACL_DEFAULT_EXISTS (NULL)
|
||||
#endif
|
||||
|
||||
# define xfs_check_acl NULL
|
||||
# define xfs_get_acl(inode, type) NULL
|
||||
# define xfs_inherit_acl(inode, default_acl) 0
|
||||
# define xfs_acl_chmod(inode) 0
|
||||
# define xfs_inode_init_acls(ip)
|
||||
# define xfs_inode_clear_acls(ip)
|
||||
# define posix_acl_access_exists(inode) 0
|
||||
# define posix_acl_default_exists(inode) 0
|
||||
#endif /* CONFIG_XFS_POSIX_ACL */
|
||||
#endif /* __XFS_ACL_H__ */
|
||||
|
@ -212,6 +212,8 @@ typedef struct xfs_perag
|
||||
/*
|
||||
* tags for inode radix tree
|
||||
*/
|
||||
#define XFS_ICI_NO_TAG (-1) /* special flag for an untagged lookup
|
||||
in xfs_inode_ag_iterator */
|
||||
#define XFS_ICI_RECLAIM_TAG 0 /* inode is to be reclaimed */
|
||||
|
||||
#define XFS_AG_MAXLEVELS(mp) ((mp)->m_ag_maxlevels)
|
||||
|
@ -73,28 +73,6 @@ static inline void be64_add_cpu(__be64 *a, __s64 b)
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
/* do we need conversion? */
|
||||
#define ARCH_NOCONVERT 1
|
||||
#ifdef XFS_NATIVE_HOST
|
||||
# define ARCH_CONVERT ARCH_NOCONVERT
|
||||
#else
|
||||
# define ARCH_CONVERT 0
|
||||
#endif
|
||||
|
||||
/* generic swapping macros */
|
||||
|
||||
#ifndef HAVE_SWABMACROS
|
||||
#define INT_SWAP16(type,var) ((typeof(type))(__swab16((__u16)(var))))
|
||||
#define INT_SWAP32(type,var) ((typeof(type))(__swab32((__u32)(var))))
|
||||
#define INT_SWAP64(type,var) ((typeof(type))(__swab64((__u64)(var))))
|
||||
#endif
|
||||
|
||||
#define INT_SWAP(type, var) \
|
||||
((sizeof(type) == 8) ? INT_SWAP64(type,var) : \
|
||||
((sizeof(type) == 4) ? INT_SWAP32(type,var) : \
|
||||
((sizeof(type) == 2) ? INT_SWAP16(type,var) : \
|
||||
(var))))
|
||||
|
||||
/*
|
||||
* get and set integers from potentially unaligned locations
|
||||
*/
|
||||
@ -107,16 +85,6 @@ static inline void be64_add_cpu(__be64 *a, __s64 b)
|
||||
((__u8*)(pointer))[1] = (((value) ) & 0xff); \
|
||||
}
|
||||
|
||||
/* does not return a value */
|
||||
#define INT_SET(reference,arch,valueref) \
|
||||
(__builtin_constant_p(valueref) ? \
|
||||
(void)( (reference) = ( ((arch) != ARCH_NOCONVERT) ? (INT_SWAP((reference),(valueref))) : (valueref)) ) : \
|
||||
(void)( \
|
||||
((reference) = (valueref)), \
|
||||
( ((arch) != ARCH_NOCONVERT) ? (reference) = INT_SWAP((reference),(reference)) : 0 ) \
|
||||
) \
|
||||
)
|
||||
|
||||
/*
|
||||
* In directories inode numbers are stored as unaligned arrays of unsigned
|
||||
* 8bit integers on disk.
|
||||
|
@ -45,7 +45,6 @@
|
||||
#include "xfs_error.h"
|
||||
#include "xfs_quota.h"
|
||||
#include "xfs_trans_space.h"
|
||||
#include "xfs_acl.h"
|
||||
#include "xfs_rw.h"
|
||||
#include "xfs_vnodeops.h"
|
||||
|
||||
@ -249,8 +248,9 @@ xfs_attr_set_int(xfs_inode_t *dp, struct xfs_name *name,
|
||||
/*
|
||||
* Attach the dquots to the inode.
|
||||
*/
|
||||
if ((error = XFS_QM_DQATTACH(mp, dp, 0)))
|
||||
return (error);
|
||||
error = xfs_qm_dqattach(dp, 0);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
/*
|
||||
* If the inode doesn't have an attribute fork, add one.
|
||||
@ -311,7 +311,7 @@ xfs_attr_set_int(xfs_inode_t *dp, struct xfs_name *name,
|
||||
}
|
||||
xfs_ilock(dp, XFS_ILOCK_EXCL);
|
||||
|
||||
error = XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, args.trans, dp, args.total, 0,
|
||||
error = xfs_trans_reserve_quota_nblks(args.trans, dp, args.total, 0,
|
||||
rsvd ? XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
|
||||
XFS_QMOPT_RES_REGBLKS);
|
||||
if (error) {
|
||||
@ -501,8 +501,9 @@ xfs_attr_remove_int(xfs_inode_t *dp, struct xfs_name *name, int flags)
|
||||
/*
|
||||
* Attach the dquots to the inode.
|
||||
*/
|
||||
if ((error = XFS_QM_DQATTACH(mp, dp, 0)))
|
||||
return (error);
|
||||
error = xfs_qm_dqattach(dp, 0);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
/*
|
||||
* Start our first transaction of the day.
|
||||
|
@ -2691,7 +2691,7 @@ xfs_bmap_rtalloc(
|
||||
* Adjust the disk quota also. This was reserved
|
||||
* earlier.
|
||||
*/
|
||||
XFS_TRANS_MOD_DQUOT_BYINO(mp, ap->tp, ap->ip,
|
||||
xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
|
||||
ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
|
||||
XFS_TRANS_DQ_RTBCOUNT, (long) ralen);
|
||||
} else {
|
||||
@ -2995,7 +2995,7 @@ xfs_bmap_btalloc(
|
||||
* Adjust the disk quota also. This was reserved
|
||||
* earlier.
|
||||
*/
|
||||
XFS_TRANS_MOD_DQUOT_BYINO(mp, ap->tp, ap->ip,
|
||||
xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
|
||||
ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT :
|
||||
XFS_TRANS_DQ_BCOUNT,
|
||||
(long) args.len);
|
||||
@ -3066,7 +3066,7 @@ xfs_bmap_btree_to_extents(
|
||||
return error;
|
||||
xfs_bmap_add_free(cbno, 1, cur->bc_private.b.flist, mp);
|
||||
ip->i_d.di_nblocks--;
|
||||
XFS_TRANS_MOD_DQUOT_BYINO(mp, tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
|
||||
xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
|
||||
xfs_trans_binval(tp, cbp);
|
||||
if (cur->bc_bufs[0] == cbp)
|
||||
cur->bc_bufs[0] = NULL;
|
||||
@ -3386,7 +3386,7 @@ xfs_bmap_del_extent(
|
||||
* Adjust quota data.
|
||||
*/
|
||||
if (qfield)
|
||||
XFS_TRANS_MOD_DQUOT_BYINO(mp, tp, ip, qfield, (long)-nblks);
|
||||
xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks);
|
||||
|
||||
/*
|
||||
* Account for change in delayed indirect blocks.
|
||||
@ -3523,7 +3523,7 @@ xfs_bmap_extents_to_btree(
|
||||
*firstblock = cur->bc_private.b.firstblock = args.fsbno;
|
||||
cur->bc_private.b.allocated++;
|
||||
ip->i_d.di_nblocks++;
|
||||
XFS_TRANS_MOD_DQUOT_BYINO(mp, tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
|
||||
xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
|
||||
abp = xfs_btree_get_bufl(mp, tp, args.fsbno, 0);
|
||||
/*
|
||||
* Fill in the child block.
|
||||
@ -3690,7 +3690,7 @@ xfs_bmap_local_to_extents(
|
||||
XFS_BMAP_TRACE_POST_UPDATE("new", ip, 0, whichfork);
|
||||
XFS_IFORK_NEXT_SET(ip, whichfork, 1);
|
||||
ip->i_d.di_nblocks = 1;
|
||||
XFS_TRANS_MOD_DQUOT_BYINO(args.mp, tp, ip,
|
||||
xfs_trans_mod_dquot_byino(tp, ip,
|
||||
XFS_TRANS_DQ_BCOUNT, 1L);
|
||||
flags |= xfs_ilog_fext(whichfork);
|
||||
} else {
|
||||
@ -4048,7 +4048,7 @@ xfs_bmap_add_attrfork(
|
||||
XFS_TRANS_PERM_LOG_RES, XFS_ADDAFORK_LOG_COUNT)))
|
||||
goto error0;
|
||||
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
||||
error = XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip, blks, 0, rsvd ?
|
||||
error = xfs_trans_reserve_quota_nblks(tp, ip, blks, 0, rsvd ?
|
||||
XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
|
||||
XFS_QMOPT_RES_REGBLKS);
|
||||
if (error) {
|
||||
@ -4983,10 +4983,11 @@ xfs_bmapi(
|
||||
* adjusted later. We return if we haven't
|
||||
* allocated blocks already inside this loop.
|
||||
*/
|
||||
if ((error = XFS_TRANS_RESERVE_QUOTA_NBLKS(
|
||||
mp, NULL, ip, (long)alen, 0,
|
||||
error = xfs_trans_reserve_quota_nblks(
|
||||
NULL, ip, (long)alen, 0,
|
||||
rt ? XFS_QMOPT_RES_RTBLKS :
|
||||
XFS_QMOPT_RES_REGBLKS))) {
|
||||
XFS_QMOPT_RES_REGBLKS);
|
||||
if (error) {
|
||||
if (n == 0) {
|
||||
*nmap = 0;
|
||||
ASSERT(cur == NULL);
|
||||
@ -5035,8 +5036,8 @@ xfs_bmapi(
|
||||
if (XFS_IS_QUOTA_ON(mp))
|
||||
/* unreserve the blocks now */
|
||||
(void)
|
||||
XFS_TRANS_UNRESERVE_QUOTA_NBLKS(
|
||||
mp, NULL, ip,
|
||||
xfs_trans_unreserve_quota_nblks(
|
||||
NULL, ip,
|
||||
(long)alen, 0, rt ?
|
||||
XFS_QMOPT_RES_RTBLKS :
|
||||
XFS_QMOPT_RES_REGBLKS);
|
||||
@ -5691,14 +5692,14 @@ xfs_bunmapi(
|
||||
do_div(rtexts, mp->m_sb.sb_rextsize);
|
||||
xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS,
|
||||
(int64_t)rtexts, rsvd);
|
||||
(void)XFS_TRANS_RESERVE_QUOTA_NBLKS(mp,
|
||||
NULL, ip, -((long)del.br_blockcount), 0,
|
||||
(void)xfs_trans_reserve_quota_nblks(NULL,
|
||||
ip, -((long)del.br_blockcount), 0,
|
||||
XFS_QMOPT_RES_RTBLKS);
|
||||
} else {
|
||||
xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS,
|
||||
(int64_t)del.br_blockcount, rsvd);
|
||||
(void)XFS_TRANS_RESERVE_QUOTA_NBLKS(mp,
|
||||
NULL, ip, -((long)del.br_blockcount), 0,
|
||||
(void)xfs_trans_reserve_quota_nblks(NULL,
|
||||
ip, -((long)del.br_blockcount), 0,
|
||||
XFS_QMOPT_RES_REGBLKS);
|
||||
}
|
||||
ip->i_delayed_blks -= del.br_blockcount;
|
||||
@ -6085,6 +6086,7 @@ xfs_getbmap(
|
||||
break;
|
||||
}
|
||||
|
||||
kmem_free(out);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -590,7 +590,7 @@ xfs_bmbt_alloc_block(
|
||||
cur->bc_private.b.allocated++;
|
||||
cur->bc_private.b.ip->i_d.di_nblocks++;
|
||||
xfs_trans_log_inode(args.tp, cur->bc_private.b.ip, XFS_ILOG_CORE);
|
||||
XFS_TRANS_MOD_DQUOT_BYINO(args.mp, args.tp, cur->bc_private.b.ip,
|
||||
xfs_trans_mod_dquot_byino(args.tp, cur->bc_private.b.ip,
|
||||
XFS_TRANS_DQ_BCOUNT, 1L);
|
||||
|
||||
new->l = cpu_to_be64(args.fsbno);
|
||||
@ -618,7 +618,7 @@ xfs_bmbt_free_block(
|
||||
ip->i_d.di_nblocks--;
|
||||
|
||||
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
|
||||
XFS_TRANS_MOD_DQUOT_BYINO(mp, tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
|
||||
xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
|
||||
xfs_trans_binval(tp, bp);
|
||||
return 0;
|
||||
}
|
||||
|
@ -542,10 +542,8 @@ xfs_filestream_associate(
|
||||
* waiting for the lock because someone else is waiting on the lock we
|
||||
* hold and we cannot drop that as we are in a transaction here.
|
||||
*
|
||||
* Lucky for us, this inversion is rarely a problem because it's a
|
||||
* directory inode that we are trying to lock here and that means the
|
||||
* only place that matters is xfs_sync_inodes() and SYNC_DELWRI is
|
||||
* used. i.e. freeze, remount-ro, quotasync or unmount.
|
||||
* Lucky for us, this inversion is not a problem because it's a
|
||||
* directory inode that we are trying to lock here.
|
||||
*
|
||||
* So, if we can't get the iolock without sleeping then just give up
|
||||
*/
|
||||
|
@ -239,10 +239,13 @@ typedef struct xfs_fsop_resblks {
|
||||
* Minimum and maximum sizes need for growth checks
|
||||
*/
|
||||
#define XFS_MIN_AG_BLOCKS 64
|
||||
#define XFS_MIN_LOG_BLOCKS 512
|
||||
#define XFS_MAX_LOG_BLOCKS (64 * 1024)
|
||||
#define XFS_MIN_LOG_BYTES (256 * 1024)
|
||||
#define XFS_MAX_LOG_BYTES (128 * 1024 * 1024)
|
||||
#define XFS_MIN_LOG_BLOCKS 512ULL
|
||||
#define XFS_MAX_LOG_BLOCKS (1024 * 1024ULL)
|
||||
#define XFS_MIN_LOG_BYTES (10 * 1024 * 1024ULL)
|
||||
|
||||
/* keep the maximum size under 2^31 by a small amount */
|
||||
#define XFS_MAX_LOG_BYTES \
|
||||
((2 * 1024 * 1024 * 1024ULL) - XFS_MIN_LOG_BYTES)
|
||||
|
||||
/*
|
||||
* Structures for XFS_IOC_FSGROWFSDATA, XFS_IOC_FSGROWFSLOG & XFS_IOC_FSGROWFSRT
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include "xfs.h"
|
||||
#include "xfs_fs.h"
|
||||
#include "xfs_types.h"
|
||||
#include "xfs_acl.h"
|
||||
#include "xfs_bit.h"
|
||||
#include "xfs_log.h"
|
||||
#include "xfs_inum.h"
|
||||
@ -82,6 +83,7 @@ xfs_inode_alloc(
|
||||
memset(&ip->i_d, 0, sizeof(xfs_icdinode_t));
|
||||
ip->i_size = 0;
|
||||
ip->i_new_size = 0;
|
||||
xfs_inode_init_acls(ip);
|
||||
|
||||
/*
|
||||
* Initialize inode's trace buffers.
|
||||
@ -500,10 +502,7 @@ xfs_ireclaim(
|
||||
* ilock one but will still hold the iolock.
|
||||
*/
|
||||
xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
|
||||
/*
|
||||
* Release dquots (and their references) if any.
|
||||
*/
|
||||
XFS_QM_DQDETACH(ip->i_mount, ip);
|
||||
xfs_qm_dqdetach(ip);
|
||||
xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
|
||||
|
||||
switch (ip->i_d.di_mode & S_IFMT) {
|
||||
@ -561,6 +560,7 @@ xfs_ireclaim(
|
||||
ASSERT(atomic_read(&ip->i_pincount) == 0);
|
||||
ASSERT(!spin_is_locked(&ip->i_flags_lock));
|
||||
ASSERT(completion_done(&ip->i_flush));
|
||||
xfs_inode_clear_acls(ip);
|
||||
kmem_zone_free(xfs_inode_zone, ip);
|
||||
}
|
||||
|
||||
|
@ -49,7 +49,6 @@
|
||||
#include "xfs_utils.h"
|
||||
#include "xfs_dir2_trace.h"
|
||||
#include "xfs_quota.h"
|
||||
#include "xfs_acl.h"
|
||||
#include "xfs_filestream.h"
|
||||
#include "xfs_vnodeops.h"
|
||||
|
||||
|
@ -18,6 +18,7 @@
|
||||
#ifndef __XFS_INODE_H__
|
||||
#define __XFS_INODE_H__
|
||||
|
||||
struct posix_acl;
|
||||
struct xfs_dinode;
|
||||
struct xfs_inode;
|
||||
|
||||
@ -272,6 +273,11 @@ typedef struct xfs_inode {
|
||||
/* VFS inode */
|
||||
struct inode i_vnode; /* embedded VFS inode */
|
||||
|
||||
#ifdef CONFIG_XFS_POSIX_ACL
|
||||
struct posix_acl *i_acl;
|
||||
struct posix_acl *i_default_acl;
|
||||
#endif
|
||||
|
||||
/* Trace buffers per inode. */
|
||||
#ifdef XFS_INODE_TRACE
|
||||
struct ktrace *i_trace; /* general inode trace */
|
||||
|
@ -42,7 +42,6 @@
|
||||
#include "xfs_error.h"
|
||||
#include "xfs_itable.h"
|
||||
#include "xfs_rw.h"
|
||||
#include "xfs_acl.h"
|
||||
#include "xfs_attr.h"
|
||||
#include "xfs_buf_item.h"
|
||||
#include "xfs_trans_space.h"
|
||||
@ -385,7 +384,7 @@ xfs_iomap_write_direct(
|
||||
* Make sure that the dquots are there. This doesn't hold
|
||||
* the ilock across a disk read.
|
||||
*/
|
||||
error = XFS_QM_DQATTACH(ip->i_mount, ip, XFS_QMOPT_ILOCKED);
|
||||
error = xfs_qm_dqattach_locked(ip, 0);
|
||||
if (error)
|
||||
return XFS_ERROR(error);
|
||||
|
||||
@ -444,8 +443,7 @@ xfs_iomap_write_direct(
|
||||
if (error)
|
||||
goto error_out;
|
||||
|
||||
error = XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip,
|
||||
qblocks, 0, quota_flag);
|
||||
error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 0, quota_flag);
|
||||
if (error)
|
||||
goto error1;
|
||||
|
||||
@ -495,7 +493,7 @@ xfs_iomap_write_direct(
|
||||
|
||||
error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
|
||||
xfs_bmap_cancel(&free_list);
|
||||
XFS_TRANS_UNRESERVE_QUOTA_NBLKS(mp, tp, ip, qblocks, 0, quota_flag);
|
||||
xfs_trans_unreserve_quota_nblks(tp, ip, qblocks, 0, quota_flag);
|
||||
|
||||
error1: /* Just cancel transaction */
|
||||
xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
|
||||
@ -582,7 +580,7 @@ xfs_iomap_write_delay(
|
||||
* Make sure that the dquots are there. This doesn't hold
|
||||
* the ilock across a disk read.
|
||||
*/
|
||||
error = XFS_QM_DQATTACH(mp, ip, XFS_QMOPT_ILOCKED);
|
||||
error = xfs_qm_dqattach_locked(ip, 0);
|
||||
if (error)
|
||||
return XFS_ERROR(error);
|
||||
|
||||
@ -684,7 +682,8 @@ xfs_iomap_write_allocate(
|
||||
/*
|
||||
* Make sure that the dquots are there.
|
||||
*/
|
||||
if ((error = XFS_QM_DQATTACH(mp, ip, 0)))
|
||||
error = xfs_qm_dqattach(ip, 0);
|
||||
if (error)
|
||||
return XFS_ERROR(error);
|
||||
|
||||
offset_fsb = XFS_B_TO_FSBT(mp, offset);
|
||||
|
@ -1975,16 +1975,30 @@ xlog_recover_do_reg_buffer(
|
||||
error = 0;
|
||||
if (buf_f->blf_flags &
|
||||
(XFS_BLI_UDQUOT_BUF|XFS_BLI_PDQUOT_BUF|XFS_BLI_GDQUOT_BUF)) {
|
||||
if (item->ri_buf[i].i_addr == NULL) {
|
||||
cmn_err(CE_ALERT,
|
||||
"XFS: NULL dquot in %s.", __func__);
|
||||
goto next;
|
||||
}
|
||||
if (item->ri_buf[i].i_len < sizeof(xfs_dqblk_t)) {
|
||||
cmn_err(CE_ALERT,
|
||||
"XFS: dquot too small (%d) in %s.",
|
||||
item->ri_buf[i].i_len, __func__);
|
||||
goto next;
|
||||
}
|
||||
error = xfs_qm_dqcheck((xfs_disk_dquot_t *)
|
||||
item->ri_buf[i].i_addr,
|
||||
-1, 0, XFS_QMOPT_DOWARN,
|
||||
"dquot_buf_recover");
|
||||
if (error)
|
||||
goto next;
|
||||
}
|
||||
if (!error)
|
||||
memcpy(xfs_buf_offset(bp,
|
||||
(uint)bit << XFS_BLI_SHIFT), /* dest */
|
||||
item->ri_buf[i].i_addr, /* source */
|
||||
nbits<<XFS_BLI_SHIFT); /* length */
|
||||
|
||||
memcpy(xfs_buf_offset(bp,
|
||||
(uint)bit << XFS_BLI_SHIFT), /* dest */
|
||||
item->ri_buf[i].i_addr, /* source */
|
||||
nbits<<XFS_BLI_SHIFT); /* length */
|
||||
next:
|
||||
i++;
|
||||
bit += nbits;
|
||||
}
|
||||
@ -2615,7 +2629,19 @@ xlog_recover_do_dquot_trans(
|
||||
return (0);
|
||||
|
||||
recddq = (xfs_disk_dquot_t *)item->ri_buf[1].i_addr;
|
||||
ASSERT(recddq);
|
||||
|
||||
if (item->ri_buf[1].i_addr == NULL) {
|
||||
cmn_err(CE_ALERT,
|
||||
"XFS: NULL dquot in %s.", __func__);
|
||||
return XFS_ERROR(EIO);
|
||||
}
|
||||
if (item->ri_buf[1].i_len < sizeof(xfs_dqblk_t)) {
|
||||
cmn_err(CE_ALERT,
|
||||
"XFS: dquot too small (%d) in %s.",
|
||||
item->ri_buf[1].i_len, __func__);
|
||||
return XFS_ERROR(EIO);
|
||||
}
|
||||
|
||||
/*
|
||||
* This type of quotas was turned off, so ignore this record.
|
||||
*/
|
||||
|
@ -959,6 +959,53 @@ xfs_check_sizes(xfs_mount_t *mp)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Clear the quotaflags in memory and in the superblock.
|
||||
*/
|
||||
int
|
||||
xfs_mount_reset_sbqflags(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
int error;
|
||||
struct xfs_trans *tp;
|
||||
|
||||
mp->m_qflags = 0;
|
||||
|
||||
/*
|
||||
* It is OK to look at sb_qflags here in mount path,
|
||||
* without m_sb_lock.
|
||||
*/
|
||||
if (mp->m_sb.sb_qflags == 0)
|
||||
return 0;
|
||||
spin_lock(&mp->m_sb_lock);
|
||||
mp->m_sb.sb_qflags = 0;
|
||||
spin_unlock(&mp->m_sb_lock);
|
||||
|
||||
/*
|
||||
* If the fs is readonly, let the incore superblock run
|
||||
* with quotas off but don't flush the update out to disk
|
||||
*/
|
||||
if (mp->m_flags & XFS_MOUNT_RDONLY)
|
||||
return 0;
|
||||
|
||||
#ifdef QUOTADEBUG
|
||||
xfs_fs_cmn_err(CE_NOTE, mp, "Writing superblock quota changes");
|
||||
#endif
|
||||
|
||||
tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE);
|
||||
error = xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0,
|
||||
XFS_DEFAULT_LOG_COUNT);
|
||||
if (error) {
|
||||
xfs_trans_cancel(tp, 0);
|
||||
xfs_fs_cmn_err(CE_ALERT, mp,
|
||||
"xfs_mount_reset_sbqflags: Superblock update failed!");
|
||||
return error;
|
||||
}
|
||||
|
||||
xfs_mod_sb(tp, XFS_SB_QFLAGS);
|
||||
return xfs_trans_commit(tp, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* This function does the following on an initial mount of a file system:
|
||||
* - reads the superblock from disk and init the mount struct
|
||||
@ -976,7 +1023,8 @@ xfs_mountfs(
|
||||
xfs_sb_t *sbp = &(mp->m_sb);
|
||||
xfs_inode_t *rip;
|
||||
__uint64_t resblks;
|
||||
uint quotamount, quotaflags;
|
||||
uint quotamount = 0;
|
||||
uint quotaflags = 0;
|
||||
int error = 0;
|
||||
|
||||
xfs_mount_common(mp, sbp);
|
||||
@ -1210,9 +1258,28 @@ xfs_mountfs(
|
||||
/*
|
||||
* Initialise the XFS quota management subsystem for this mount
|
||||
*/
|
||||
error = XFS_QM_INIT(mp, "amount, "aflags);
|
||||
if (error)
|
||||
goto out_rtunmount;
|
||||
if (XFS_IS_QUOTA_RUNNING(mp)) {
|
||||
error = xfs_qm_newmount(mp, "amount, "aflags);
|
||||
if (error)
|
||||
goto out_rtunmount;
|
||||
} else {
|
||||
ASSERT(!XFS_IS_QUOTA_ON(mp));
|
||||
|
||||
/*
|
||||
* If a file system had quotas running earlier, but decided to
|
||||
* mount without -o uquota/pquota/gquota options, revoke the
|
||||
* quotachecked license.
|
||||
*/
|
||||
if (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_ACCT) {
|
||||
cmn_err(CE_NOTE,
|
||||
"XFS: resetting qflags for filesystem %s",
|
||||
mp->m_fsname);
|
||||
|
||||
error = xfs_mount_reset_sbqflags(mp);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Finish recovering the file system. This part needed to be
|
||||
@ -1228,9 +1295,19 @@ xfs_mountfs(
|
||||
/*
|
||||
* Complete the quota initialisation, post-log-replay component.
|
||||
*/
|
||||
error = XFS_QM_MOUNT(mp, quotamount, quotaflags);
|
||||
if (error)
|
||||
goto out_rtunmount;
|
||||
if (quotamount) {
|
||||
ASSERT(mp->m_qflags == 0);
|
||||
mp->m_qflags = quotaflags;
|
||||
|
||||
xfs_qm_mount_quotas(mp);
|
||||
}
|
||||
|
||||
#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
|
||||
if (XFS_IS_QUOTA_ON(mp))
|
||||
xfs_fs_cmn_err(CE_NOTE, mp, "Disk quotas turned on");
|
||||
else
|
||||
xfs_fs_cmn_err(CE_NOTE, mp, "Disk quotas not turned on");
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Now we are mounted, reserve a small amount of unused space for
|
||||
@ -1279,12 +1356,7 @@ xfs_unmountfs(
|
||||
__uint64_t resblks;
|
||||
int error;
|
||||
|
||||
/*
|
||||
* Release dquot that rootinode, rbmino and rsumino might be holding,
|
||||
* and release the quota inodes.
|
||||
*/
|
||||
XFS_QM_UNMOUNT(mp);
|
||||
|
||||
xfs_qm_unmount_quotas(mp);
|
||||
xfs_rtunmount_inodes(mp);
|
||||
IRELE(mp->m_rootip);
|
||||
|
||||
@ -1299,12 +1371,9 @@ xfs_unmountfs(
|
||||
* need to force the log first.
|
||||
*/
|
||||
xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE | XFS_LOG_SYNC);
|
||||
xfs_reclaim_inodes(mp, 0, XFS_IFLUSH_ASYNC);
|
||||
xfs_reclaim_inodes(mp, XFS_IFLUSH_ASYNC);
|
||||
|
||||
XFS_QM_DQPURGEALL(mp, XFS_QMOPT_QUOTALL | XFS_QMOPT_UMOUNTING);
|
||||
|
||||
if (mp->m_quotainfo)
|
||||
XFS_QM_DONE(mp);
|
||||
xfs_qm_unmount(mp);
|
||||
|
||||
/*
|
||||
* Flush out the log synchronously so that we know for sure
|
||||
|
@ -64,6 +64,8 @@ struct xfs_swapext;
|
||||
struct xfs_mru_cache;
|
||||
struct xfs_nameops;
|
||||
struct xfs_ail;
|
||||
struct xfs_quotainfo;
|
||||
|
||||
|
||||
/*
|
||||
* Prototypes and functions for the Data Migration subsystem.
|
||||
@ -107,86 +109,6 @@ typedef struct xfs_dmops {
|
||||
(*(mp)->m_dm_ops->xfs_send_unmount)(mp,ip,right,mode,rval,fl)
|
||||
|
||||
|
||||
/*
|
||||
* Prototypes and functions for the Quota Management subsystem.
|
||||
*/
|
||||
|
||||
struct xfs_dquot;
|
||||
struct xfs_dqtrxops;
|
||||
struct xfs_quotainfo;
|
||||
|
||||
typedef int (*xfs_qminit_t)(struct xfs_mount *, uint *, uint *);
|
||||
typedef int (*xfs_qmmount_t)(struct xfs_mount *, uint, uint);
|
||||
typedef void (*xfs_qmunmount_t)(struct xfs_mount *);
|
||||
typedef void (*xfs_qmdone_t)(struct xfs_mount *);
|
||||
typedef void (*xfs_dqrele_t)(struct xfs_dquot *);
|
||||
typedef int (*xfs_dqattach_t)(struct xfs_inode *, uint);
|
||||
typedef void (*xfs_dqdetach_t)(struct xfs_inode *);
|
||||
typedef int (*xfs_dqpurgeall_t)(struct xfs_mount *, uint);
|
||||
typedef int (*xfs_dqvopalloc_t)(struct xfs_mount *,
|
||||
struct xfs_inode *, uid_t, gid_t, prid_t, uint,
|
||||
struct xfs_dquot **, struct xfs_dquot **);
|
||||
typedef void (*xfs_dqvopcreate_t)(struct xfs_trans *, struct xfs_inode *,
|
||||
struct xfs_dquot *, struct xfs_dquot *);
|
||||
typedef int (*xfs_dqvoprename_t)(struct xfs_inode **);
|
||||
typedef struct xfs_dquot * (*xfs_dqvopchown_t)(
|
||||
struct xfs_trans *, struct xfs_inode *,
|
||||
struct xfs_dquot **, struct xfs_dquot *);
|
||||
typedef int (*xfs_dqvopchownresv_t)(struct xfs_trans *, struct xfs_inode *,
|
||||
struct xfs_dquot *, struct xfs_dquot *, uint);
|
||||
typedef void (*xfs_dqstatvfs_t)(struct xfs_inode *, struct kstatfs *);
|
||||
typedef int (*xfs_dqsync_t)(struct xfs_mount *, int flags);
|
||||
|
||||
typedef struct xfs_qmops {
|
||||
xfs_qminit_t xfs_qminit;
|
||||
xfs_qmdone_t xfs_qmdone;
|
||||
xfs_qmmount_t xfs_qmmount;
|
||||
xfs_qmunmount_t xfs_qmunmount;
|
||||
xfs_dqrele_t xfs_dqrele;
|
||||
xfs_dqattach_t xfs_dqattach;
|
||||
xfs_dqdetach_t xfs_dqdetach;
|
||||
xfs_dqpurgeall_t xfs_dqpurgeall;
|
||||
xfs_dqvopalloc_t xfs_dqvopalloc;
|
||||
xfs_dqvopcreate_t xfs_dqvopcreate;
|
||||
xfs_dqvoprename_t xfs_dqvoprename;
|
||||
xfs_dqvopchown_t xfs_dqvopchown;
|
||||
xfs_dqvopchownresv_t xfs_dqvopchownresv;
|
||||
xfs_dqstatvfs_t xfs_dqstatvfs;
|
||||
xfs_dqsync_t xfs_dqsync;
|
||||
struct xfs_dqtrxops *xfs_dqtrxops;
|
||||
} xfs_qmops_t;
|
||||
|
||||
#define XFS_QM_INIT(mp, mnt, fl) \
|
||||
(*(mp)->m_qm_ops->xfs_qminit)(mp, mnt, fl)
|
||||
#define XFS_QM_MOUNT(mp, mnt, fl) \
|
||||
(*(mp)->m_qm_ops->xfs_qmmount)(mp, mnt, fl)
|
||||
#define XFS_QM_UNMOUNT(mp) \
|
||||
(*(mp)->m_qm_ops->xfs_qmunmount)(mp)
|
||||
#define XFS_QM_DONE(mp) \
|
||||
(*(mp)->m_qm_ops->xfs_qmdone)(mp)
|
||||
#define XFS_QM_DQRELE(mp, dq) \
|
||||
(*(mp)->m_qm_ops->xfs_dqrele)(dq)
|
||||
#define XFS_QM_DQATTACH(mp, ip, fl) \
|
||||
(*(mp)->m_qm_ops->xfs_dqattach)(ip, fl)
|
||||
#define XFS_QM_DQDETACH(mp, ip) \
|
||||
(*(mp)->m_qm_ops->xfs_dqdetach)(ip)
|
||||
#define XFS_QM_DQPURGEALL(mp, fl) \
|
||||
(*(mp)->m_qm_ops->xfs_dqpurgeall)(mp, fl)
|
||||
#define XFS_QM_DQVOPALLOC(mp, ip, uid, gid, prid, fl, dq1, dq2) \
|
||||
(*(mp)->m_qm_ops->xfs_dqvopalloc)(mp, ip, uid, gid, prid, fl, dq1, dq2)
|
||||
#define XFS_QM_DQVOPCREATE(mp, tp, ip, dq1, dq2) \
|
||||
(*(mp)->m_qm_ops->xfs_dqvopcreate)(tp, ip, dq1, dq2)
|
||||
#define XFS_QM_DQVOPRENAME(mp, ip) \
|
||||
(*(mp)->m_qm_ops->xfs_dqvoprename)(ip)
|
||||
#define XFS_QM_DQVOPCHOWN(mp, tp, ip, dqp, dq) \
|
||||
(*(mp)->m_qm_ops->xfs_dqvopchown)(tp, ip, dqp, dq)
|
||||
#define XFS_QM_DQVOPCHOWNRESV(mp, tp, ip, dq1, dq2, fl) \
|
||||
(*(mp)->m_qm_ops->xfs_dqvopchownresv)(tp, ip, dq1, dq2, fl)
|
||||
#define XFS_QM_DQSTATVFS(ip, statp) \
|
||||
(*(ip)->i_mount->m_qm_ops->xfs_dqstatvfs)(ip, statp)
|
||||
#define XFS_QM_DQSYNC(mp, flags) \
|
||||
(*(mp)->m_qm_ops->xfs_dqsync)(mp, flags)
|
||||
|
||||
#ifdef HAVE_PERCPU_SB
|
||||
|
||||
/*
|
||||
@ -510,8 +432,6 @@ extern int xfs_sb_validate_fsb_count(struct xfs_sb *, __uint64_t);
|
||||
|
||||
extern int xfs_dmops_get(struct xfs_mount *);
|
||||
extern void xfs_dmops_put(struct xfs_mount *);
|
||||
extern int xfs_qmops_get(struct xfs_mount *);
|
||||
extern void xfs_qmops_put(struct xfs_mount *);
|
||||
|
||||
extern struct xfs_dmops xfs_dmcore_xfs;
|
||||
|
||||
|
@ -1,152 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2000-2005 Silicon Graphics, Inc.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it would be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
#include "xfs.h"
|
||||
#include "xfs_fs.h"
|
||||
#include "xfs_types.h"
|
||||
#include "xfs_log.h"
|
||||
#include "xfs_inum.h"
|
||||
#include "xfs_trans.h"
|
||||
#include "xfs_sb.h"
|
||||
#include "xfs_ag.h"
|
||||
#include "xfs_dir2.h"
|
||||
#include "xfs_dmapi.h"
|
||||
#include "xfs_mount.h"
|
||||
#include "xfs_quota.h"
|
||||
#include "xfs_error.h"
|
||||
|
||||
|
||||
STATIC struct xfs_dquot *
|
||||
xfs_dqvopchown_default(
|
||||
struct xfs_trans *tp,
|
||||
struct xfs_inode *ip,
|
||||
struct xfs_dquot **dqp,
|
||||
struct xfs_dquot *dq)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Clear the quotaflags in memory and in the superblock.
|
||||
*/
|
||||
int
|
||||
xfs_mount_reset_sbqflags(xfs_mount_t *mp)
|
||||
{
|
||||
int error;
|
||||
xfs_trans_t *tp;
|
||||
|
||||
mp->m_qflags = 0;
|
||||
/*
|
||||
* It is OK to look at sb_qflags here in mount path,
|
||||
* without m_sb_lock.
|
||||
*/
|
||||
if (mp->m_sb.sb_qflags == 0)
|
||||
return 0;
|
||||
spin_lock(&mp->m_sb_lock);
|
||||
mp->m_sb.sb_qflags = 0;
|
||||
spin_unlock(&mp->m_sb_lock);
|
||||
|
||||
/*
|
||||
* if the fs is readonly, let the incore superblock run
|
||||
* with quotas off but don't flush the update out to disk
|
||||
*/
|
||||
if (mp->m_flags & XFS_MOUNT_RDONLY)
|
||||
return 0;
|
||||
#ifdef QUOTADEBUG
|
||||
xfs_fs_cmn_err(CE_NOTE, mp, "Writing superblock quota changes");
|
||||
#endif
|
||||
tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE);
|
||||
if ((error = xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0,
|
||||
XFS_DEFAULT_LOG_COUNT))) {
|
||||
xfs_trans_cancel(tp, 0);
|
||||
xfs_fs_cmn_err(CE_ALERT, mp,
|
||||
"xfs_mount_reset_sbqflags: Superblock update failed!");
|
||||
return error;
|
||||
}
|
||||
xfs_mod_sb(tp, XFS_SB_QFLAGS);
|
||||
error = xfs_trans_commit(tp, 0);
|
||||
return error;
|
||||
}
|
||||
|
||||
STATIC int
|
||||
xfs_noquota_init(
|
||||
xfs_mount_t *mp,
|
||||
uint *needquotamount,
|
||||
uint *quotaflags)
|
||||
{
|
||||
int error = 0;
|
||||
|
||||
*quotaflags = 0;
|
||||
*needquotamount = B_FALSE;
|
||||
|
||||
ASSERT(!XFS_IS_QUOTA_ON(mp));
|
||||
|
||||
/*
|
||||
* If a file system had quotas running earlier, but decided to
|
||||
* mount without -o uquota/pquota/gquota options, revoke the
|
||||
* quotachecked license.
|
||||
*/
|
||||
if (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_ACCT) {
|
||||
cmn_err(CE_NOTE,
|
||||
"XFS resetting qflags for filesystem %s",
|
||||
mp->m_fsname);
|
||||
|
||||
error = xfs_mount_reset_sbqflags(mp);
|
||||
}
|
||||
return error;
|
||||
}
|
||||
|
||||
static struct xfs_qmops xfs_qmcore_stub = {
|
||||
.xfs_qminit = (xfs_qminit_t) xfs_noquota_init,
|
||||
.xfs_qmdone = (xfs_qmdone_t) fs_noerr,
|
||||
.xfs_qmmount = (xfs_qmmount_t) fs_noerr,
|
||||
.xfs_qmunmount = (xfs_qmunmount_t) fs_noerr,
|
||||
.xfs_dqrele = (xfs_dqrele_t) fs_noerr,
|
||||
.xfs_dqattach = (xfs_dqattach_t) fs_noerr,
|
||||
.xfs_dqdetach = (xfs_dqdetach_t) fs_noerr,
|
||||
.xfs_dqpurgeall = (xfs_dqpurgeall_t) fs_noerr,
|
||||
.xfs_dqvopalloc = (xfs_dqvopalloc_t) fs_noerr,
|
||||
.xfs_dqvopcreate = (xfs_dqvopcreate_t) fs_noerr,
|
||||
.xfs_dqvoprename = (xfs_dqvoprename_t) fs_noerr,
|
||||
.xfs_dqvopchown = xfs_dqvopchown_default,
|
||||
.xfs_dqvopchownresv = (xfs_dqvopchownresv_t) fs_noerr,
|
||||
.xfs_dqstatvfs = (xfs_dqstatvfs_t) fs_noval,
|
||||
.xfs_dqsync = (xfs_dqsync_t) fs_noerr,
|
||||
};
|
||||
|
||||
int
|
||||
xfs_qmops_get(struct xfs_mount *mp)
|
||||
{
|
||||
if (XFS_IS_QUOTA_RUNNING(mp)) {
|
||||
#ifdef CONFIG_XFS_QUOTA
|
||||
mp->m_qm_ops = &xfs_qmcore_xfs;
|
||||
#else
|
||||
cmn_err(CE_WARN,
|
||||
"XFS: qouta support not available in this kernel.");
|
||||
return EINVAL;
|
||||
#endif
|
||||
} else {
|
||||
mp->m_qm_ops = &xfs_qmcore_stub;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
xfs_qmops_put(struct xfs_mount *mp)
|
||||
{
|
||||
}
|
@ -197,7 +197,6 @@ typedef struct xfs_qoff_logformat {
|
||||
#define XFS_QMOPT_UMOUNTING 0x0000100 /* filesys is being unmounted */
|
||||
#define XFS_QMOPT_DOLOG 0x0000200 /* log buf changes (in quotacheck) */
|
||||
#define XFS_QMOPT_DOWARN 0x0000400 /* increase warning cnt if needed */
|
||||
#define XFS_QMOPT_ILOCKED 0x0000800 /* inode is already locked (excl) */
|
||||
#define XFS_QMOPT_DQREPAIR 0x0001000 /* repair dquot if damaged */
|
||||
#define XFS_QMOPT_GQUOTA 0x0002000 /* group dquot requested */
|
||||
#define XFS_QMOPT_ENOSPC 0x0004000 /* enospc instead of edquot (prj) */
|
||||
@ -302,69 +301,79 @@ typedef struct xfs_dqtrx {
|
||||
long qt_delrtb_delta; /* delayed RT blk count changes */
|
||||
} xfs_dqtrx_t;
|
||||
|
||||
/*
|
||||
* Dquot transaction functions, used if quota is enabled.
|
||||
*/
|
||||
typedef void (*qo_dup_dqinfo_t)(struct xfs_trans *, struct xfs_trans *);
|
||||
typedef void (*qo_mod_dquot_byino_t)(struct xfs_trans *,
|
||||
struct xfs_inode *, uint, long);
|
||||
typedef void (*qo_free_dqinfo_t)(struct xfs_trans *);
|
||||
typedef void (*qo_apply_dquot_deltas_t)(struct xfs_trans *);
|
||||
typedef void (*qo_unreserve_and_mod_dquots_t)(struct xfs_trans *);
|
||||
typedef int (*qo_reserve_quota_nblks_t)(
|
||||
struct xfs_trans *, struct xfs_mount *,
|
||||
struct xfs_inode *, long, long, uint);
|
||||
typedef int (*qo_reserve_quota_bydquots_t)(
|
||||
struct xfs_trans *, struct xfs_mount *,
|
||||
struct xfs_dquot *, struct xfs_dquot *,
|
||||
long, long, uint);
|
||||
typedef struct xfs_dqtrxops {
|
||||
qo_dup_dqinfo_t qo_dup_dqinfo;
|
||||
qo_free_dqinfo_t qo_free_dqinfo;
|
||||
qo_mod_dquot_byino_t qo_mod_dquot_byino;
|
||||
qo_apply_dquot_deltas_t qo_apply_dquot_deltas;
|
||||
qo_reserve_quota_nblks_t qo_reserve_quota_nblks;
|
||||
qo_reserve_quota_bydquots_t qo_reserve_quota_bydquots;
|
||||
qo_unreserve_and_mod_dquots_t qo_unreserve_and_mod_dquots;
|
||||
} xfs_dqtrxops_t;
|
||||
#ifdef CONFIG_XFS_QUOTA
|
||||
extern void xfs_trans_dup_dqinfo(struct xfs_trans *, struct xfs_trans *);
|
||||
extern void xfs_trans_free_dqinfo(struct xfs_trans *);
|
||||
extern void xfs_trans_mod_dquot_byino(struct xfs_trans *, struct xfs_inode *,
|
||||
uint, long);
|
||||
extern void xfs_trans_apply_dquot_deltas(struct xfs_trans *);
|
||||
extern void xfs_trans_unreserve_and_mod_dquots(struct xfs_trans *);
|
||||
extern int xfs_trans_reserve_quota_nblks(struct xfs_trans *,
|
||||
struct xfs_inode *, long, long, uint);
|
||||
extern int xfs_trans_reserve_quota_bydquots(struct xfs_trans *,
|
||||
struct xfs_mount *, struct xfs_dquot *,
|
||||
struct xfs_dquot *, long, long, uint);
|
||||
|
||||
#define XFS_DQTRXOP(mp, tp, op, args...) \
|
||||
((mp)->m_qm_ops->xfs_dqtrxops ? \
|
||||
((mp)->m_qm_ops->xfs_dqtrxops->op)(tp, ## args) : 0)
|
||||
extern int xfs_qm_vop_dqalloc(struct xfs_inode *, uid_t, gid_t, prid_t, uint,
|
||||
struct xfs_dquot **, struct xfs_dquot **);
|
||||
extern void xfs_qm_vop_create_dqattach(struct xfs_trans *, struct xfs_inode *,
|
||||
struct xfs_dquot *, struct xfs_dquot *);
|
||||
extern int xfs_qm_vop_rename_dqattach(struct xfs_inode **);
|
||||
extern struct xfs_dquot *xfs_qm_vop_chown(struct xfs_trans *,
|
||||
struct xfs_inode *, struct xfs_dquot **, struct xfs_dquot *);
|
||||
extern int xfs_qm_vop_chown_reserve(struct xfs_trans *, struct xfs_inode *,
|
||||
struct xfs_dquot *, struct xfs_dquot *, uint);
|
||||
extern int xfs_qm_dqattach(struct xfs_inode *, uint);
|
||||
extern int xfs_qm_dqattach_locked(struct xfs_inode *, uint);
|
||||
extern void xfs_qm_dqdetach(struct xfs_inode *);
|
||||
extern void xfs_qm_dqrele(struct xfs_dquot *);
|
||||
extern void xfs_qm_statvfs(struct xfs_inode *, struct kstatfs *);
|
||||
extern int xfs_qm_sync(struct xfs_mount *, int);
|
||||
extern int xfs_qm_newmount(struct xfs_mount *, uint *, uint *);
|
||||
extern void xfs_qm_mount_quotas(struct xfs_mount *);
|
||||
extern void xfs_qm_unmount(struct xfs_mount *);
|
||||
extern void xfs_qm_unmount_quotas(struct xfs_mount *);
|
||||
|
||||
#define XFS_DQTRXOP_VOID(mp, tp, op, args...) \
|
||||
((mp)->m_qm_ops->xfs_dqtrxops ? \
|
||||
((mp)->m_qm_ops->xfs_dqtrxops->op)(tp, ## args) : (void)0)
|
||||
#else
|
||||
static inline int
|
||||
xfs_qm_vop_dqalloc(struct xfs_inode *ip, uid_t uid, gid_t gid, prid_t prid,
|
||||
uint flags, struct xfs_dquot **udqp, struct xfs_dquot **gdqp)
|
||||
{
|
||||
*udqp = NULL;
|
||||
*gdqp = NULL;
|
||||
return 0;
|
||||
}
|
||||
#define xfs_trans_dup_dqinfo(tp, tp2)
|
||||
#define xfs_trans_free_dqinfo(tp)
|
||||
#define xfs_trans_mod_dquot_byino(tp, ip, fields, delta)
|
||||
#define xfs_trans_apply_dquot_deltas(tp)
|
||||
#define xfs_trans_unreserve_and_mod_dquots(tp)
|
||||
#define xfs_trans_reserve_quota_nblks(tp, ip, nblks, ninos, flags) (0)
|
||||
#define xfs_trans_reserve_quota_bydquots(tp, mp, u, g, nb, ni, fl) (0)
|
||||
#define xfs_qm_vop_create_dqattach(tp, ip, u, g)
|
||||
#define xfs_qm_vop_rename_dqattach(it) (0)
|
||||
#define xfs_qm_vop_chown(tp, ip, old, new) (NULL)
|
||||
#define xfs_qm_vop_chown_reserve(tp, ip, u, g, fl) (0)
|
||||
#define xfs_qm_dqattach(ip, fl) (0)
|
||||
#define xfs_qm_dqattach_locked(ip, fl) (0)
|
||||
#define xfs_qm_dqdetach(ip)
|
||||
#define xfs_qm_dqrele(d)
|
||||
#define xfs_qm_statvfs(ip, s)
|
||||
#define xfs_qm_sync(mp, fl) (0)
|
||||
#define xfs_qm_newmount(mp, a, b) (0)
|
||||
#define xfs_qm_mount_quotas(mp)
|
||||
#define xfs_qm_unmount(mp)
|
||||
#define xfs_qm_unmount_quotas(mp) (0)
|
||||
#endif /* CONFIG_XFS_QUOTA */
|
||||
|
||||
#define XFS_TRANS_DUP_DQINFO(mp, otp, ntp) \
|
||||
XFS_DQTRXOP_VOID(mp, otp, qo_dup_dqinfo, ntp)
|
||||
#define XFS_TRANS_FREE_DQINFO(mp, tp) \
|
||||
XFS_DQTRXOP_VOID(mp, tp, qo_free_dqinfo)
|
||||
#define XFS_TRANS_MOD_DQUOT_BYINO(mp, tp, ip, field, delta) \
|
||||
XFS_DQTRXOP_VOID(mp, tp, qo_mod_dquot_byino, ip, field, delta)
|
||||
#define XFS_TRANS_APPLY_DQUOT_DELTAS(mp, tp) \
|
||||
XFS_DQTRXOP_VOID(mp, tp, qo_apply_dquot_deltas)
|
||||
#define XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip, nblks, ninos, fl) \
|
||||
XFS_DQTRXOP(mp, tp, qo_reserve_quota_nblks, mp, ip, nblks, ninos, fl)
|
||||
#define XFS_TRANS_RESERVE_QUOTA_BYDQUOTS(mp, tp, ud, gd, nb, ni, fl) \
|
||||
XFS_DQTRXOP(mp, tp, qo_reserve_quota_bydquots, mp, ud, gd, nb, ni, fl)
|
||||
#define XFS_TRANS_UNRESERVE_AND_MOD_DQUOTS(mp, tp) \
|
||||
XFS_DQTRXOP_VOID(mp, tp, qo_unreserve_and_mod_dquots)
|
||||
|
||||
#define XFS_TRANS_UNRESERVE_QUOTA_NBLKS(mp, tp, ip, nblks, ninos, flags) \
|
||||
XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip, -(nblks), -(ninos), flags)
|
||||
#define XFS_TRANS_RESERVE_QUOTA(mp, tp, ud, gd, nb, ni, f) \
|
||||
XFS_TRANS_RESERVE_QUOTA_BYDQUOTS(mp, tp, ud, gd, nb, ni, \
|
||||
f | XFS_QMOPT_RES_REGBLKS)
|
||||
#define XFS_TRANS_UNRESERVE_QUOTA(mp, tp, ud, gd, nb, ni, f) \
|
||||
XFS_TRANS_RESERVE_QUOTA_BYDQUOTS(mp, tp, ud, gd, -(nb), -(ni), \
|
||||
#define xfs_trans_unreserve_quota_nblks(tp, ip, nblks, ninos, flags) \
|
||||
xfs_trans_reserve_quota_nblks(tp, ip, -(nblks), -(ninos), flags)
|
||||
#define xfs_trans_reserve_quota(tp, mp, ud, gd, nb, ni, f) \
|
||||
xfs_trans_reserve_quota_bydquots(tp, mp, ud, gd, nb, ni, \
|
||||
f | XFS_QMOPT_RES_REGBLKS)
|
||||
|
||||
extern int xfs_qm_dqcheck(xfs_disk_dquot_t *, xfs_dqid_t, uint, uint, char *);
|
||||
extern int xfs_mount_reset_sbqflags(struct xfs_mount *);
|
||||
|
||||
extern struct xfs_qmops xfs_qmcore_xfs;
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif /* __XFS_QUOTA_H__ */
|
||||
|
@ -166,7 +166,8 @@ xfs_rename(
|
||||
/*
|
||||
* Attach the dquots to the inodes
|
||||
*/
|
||||
if ((error = XFS_QM_DQVOPRENAME(mp, inodes))) {
|
||||
error = xfs_qm_vop_rename_dqattach(inodes);
|
||||
if (error) {
|
||||
xfs_trans_cancel(tp, cancel_flags);
|
||||
goto std_return;
|
||||
}
|
||||
|
@ -41,7 +41,6 @@
|
||||
#include "xfs_ialloc.h"
|
||||
#include "xfs_attr.h"
|
||||
#include "xfs_bmap.h"
|
||||
#include "xfs_acl.h"
|
||||
#include "xfs_error.h"
|
||||
#include "xfs_buf_item.h"
|
||||
#include "xfs_rw.h"
|
||||
|
@ -297,7 +297,7 @@ xfs_trans_dup(
|
||||
tp->t_rtx_res = tp->t_rtx_res_used;
|
||||
ntp->t_pflags = tp->t_pflags;
|
||||
|
||||
XFS_TRANS_DUP_DQINFO(tp->t_mountp, tp, ntp);
|
||||
xfs_trans_dup_dqinfo(tp, ntp);
|
||||
|
||||
atomic_inc(&tp->t_mountp->m_active_trans);
|
||||
return ntp;
|
||||
@ -829,7 +829,7 @@ _xfs_trans_commit(
|
||||
* means is that we have some (non-persistent) quota
|
||||
* reservations that need to be unreserved.
|
||||
*/
|
||||
XFS_TRANS_UNRESERVE_AND_MOD_DQUOTS(mp, tp);
|
||||
xfs_trans_unreserve_and_mod_dquots(tp);
|
||||
if (tp->t_ticket) {
|
||||
commit_lsn = xfs_log_done(mp, tp->t_ticket,
|
||||
NULL, log_flags);
|
||||
@ -848,10 +848,9 @@ _xfs_trans_commit(
|
||||
/*
|
||||
* If we need to update the superblock, then do it now.
|
||||
*/
|
||||
if (tp->t_flags & XFS_TRANS_SB_DIRTY) {
|
||||
if (tp->t_flags & XFS_TRANS_SB_DIRTY)
|
||||
xfs_trans_apply_sb_deltas(tp);
|
||||
}
|
||||
XFS_TRANS_APPLY_DQUOT_DELTAS(mp, tp);
|
||||
xfs_trans_apply_dquot_deltas(tp);
|
||||
|
||||
/*
|
||||
* Ask each log item how many log_vector entries it will
|
||||
@ -1056,7 +1055,7 @@ xfs_trans_uncommit(
|
||||
}
|
||||
|
||||
xfs_trans_unreserve_and_mod_sb(tp);
|
||||
XFS_TRANS_UNRESERVE_AND_MOD_DQUOTS(tp->t_mountp, tp);
|
||||
xfs_trans_unreserve_and_mod_dquots(tp);
|
||||
|
||||
xfs_trans_free_items(tp, flags);
|
||||
xfs_trans_free_busy(tp);
|
||||
@ -1181,7 +1180,7 @@ xfs_trans_cancel(
|
||||
}
|
||||
#endif
|
||||
xfs_trans_unreserve_and_mod_sb(tp);
|
||||
XFS_TRANS_UNRESERVE_AND_MOD_DQUOTS(mp, tp);
|
||||
xfs_trans_unreserve_and_mod_dquots(tp);
|
||||
|
||||
if (tp->t_ticket) {
|
||||
if (flags & XFS_TRANS_RELEASE_LOG_RES) {
|
||||
@ -1211,7 +1210,7 @@ xfs_trans_free(
|
||||
xfs_trans_t *tp)
|
||||
{
|
||||
atomic_dec(&tp->t_mountp->m_active_trans);
|
||||
XFS_TRANS_FREE_DQINFO(tp->t_mountp, tp);
|
||||
xfs_trans_free_dqinfo(tp);
|
||||
kmem_zone_free(xfs_trans_zone, tp);
|
||||
}
|
||||
|
||||
|
@ -166,7 +166,7 @@ xfs_dir_ialloc(
|
||||
xfs_buf_relse(ialloc_context);
|
||||
if (dqinfo) {
|
||||
tp->t_dqinfo = dqinfo;
|
||||
XFS_TRANS_FREE_DQINFO(tp->t_mountp, tp);
|
||||
xfs_trans_free_dqinfo(tp);
|
||||
}
|
||||
*tpp = ntp;
|
||||
*ipp = NULL;
|
||||
|
@ -42,6 +42,7 @@
|
||||
#include "xfs_ialloc.h"
|
||||
#include "xfs_alloc.h"
|
||||
#include "xfs_bmap.h"
|
||||
#include "xfs_acl.h"
|
||||
#include "xfs_attr.h"
|
||||
#include "xfs_rw.h"
|
||||
#include "xfs_error.h"
|
||||
@ -118,7 +119,7 @@ xfs_setattr(
|
||||
*/
|
||||
ASSERT(udqp == NULL);
|
||||
ASSERT(gdqp == NULL);
|
||||
code = XFS_QM_DQVOPALLOC(mp, ip, uid, gid, ip->i_d.di_projid,
|
||||
code = xfs_qm_vop_dqalloc(ip, uid, gid, ip->i_d.di_projid,
|
||||
qflags, &udqp, &gdqp);
|
||||
if (code)
|
||||
return code;
|
||||
@ -180,10 +181,11 @@ xfs_setattr(
|
||||
* Do a quota reservation only if uid/gid is actually
|
||||
* going to change.
|
||||
*/
|
||||
if ((XFS_IS_UQUOTA_ON(mp) && iuid != uid) ||
|
||||
(XFS_IS_GQUOTA_ON(mp) && igid != gid)) {
|
||||
if (XFS_IS_QUOTA_RUNNING(mp) &&
|
||||
((XFS_IS_UQUOTA_ON(mp) && iuid != uid) ||
|
||||
(XFS_IS_GQUOTA_ON(mp) && igid != gid))) {
|
||||
ASSERT(tp);
|
||||
code = XFS_QM_DQVOPCHOWNRESV(mp, tp, ip, udqp, gdqp,
|
||||
code = xfs_qm_vop_chown_reserve(tp, ip, udqp, gdqp,
|
||||
capable(CAP_FOWNER) ?
|
||||
XFS_QMOPT_FORCE_RES : 0);
|
||||
if (code) /* out of quota */
|
||||
@ -217,7 +219,7 @@ xfs_setattr(
|
||||
/*
|
||||
* Make sure that the dquots are attached to the inode.
|
||||
*/
|
||||
code = XFS_QM_DQATTACH(mp, ip, XFS_QMOPT_ILOCKED);
|
||||
code = xfs_qm_dqattach_locked(ip, 0);
|
||||
if (code)
|
||||
goto error_return;
|
||||
|
||||
@ -351,21 +353,21 @@ xfs_setattr(
|
||||
* in the transaction.
|
||||
*/
|
||||
if (iuid != uid) {
|
||||
if (XFS_IS_UQUOTA_ON(mp)) {
|
||||
if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_UQUOTA_ON(mp)) {
|
||||
ASSERT(mask & ATTR_UID);
|
||||
ASSERT(udqp);
|
||||
olddquot1 = XFS_QM_DQVOPCHOWN(mp, tp, ip,
|
||||
olddquot1 = xfs_qm_vop_chown(tp, ip,
|
||||
&ip->i_udquot, udqp);
|
||||
}
|
||||
ip->i_d.di_uid = uid;
|
||||
inode->i_uid = uid;
|
||||
}
|
||||
if (igid != gid) {
|
||||
if (XFS_IS_GQUOTA_ON(mp)) {
|
||||
if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_GQUOTA_ON(mp)) {
|
||||
ASSERT(!XFS_IS_PQUOTA_ON(mp));
|
||||
ASSERT(mask & ATTR_GID);
|
||||
ASSERT(gdqp);
|
||||
olddquot2 = XFS_QM_DQVOPCHOWN(mp, tp, ip,
|
||||
olddquot2 = xfs_qm_vop_chown(tp, ip,
|
||||
&ip->i_gdquot, gdqp);
|
||||
}
|
||||
ip->i_d.di_gid = gid;
|
||||
@ -461,13 +463,25 @@ xfs_setattr(
|
||||
/*
|
||||
* Release any dquot(s) the inode had kept before chown.
|
||||
*/
|
||||
XFS_QM_DQRELE(mp, olddquot1);
|
||||
XFS_QM_DQRELE(mp, olddquot2);
|
||||
XFS_QM_DQRELE(mp, udqp);
|
||||
XFS_QM_DQRELE(mp, gdqp);
|
||||
xfs_qm_dqrele(olddquot1);
|
||||
xfs_qm_dqrele(olddquot2);
|
||||
xfs_qm_dqrele(udqp);
|
||||
xfs_qm_dqrele(gdqp);
|
||||
|
||||
if (code) {
|
||||
if (code)
|
||||
return code;
|
||||
|
||||
/*
|
||||
* XXX(hch): Updating the ACL entries is not atomic vs the i_mode
|
||||
* update. We could avoid this with linked transactions
|
||||
* and passing down the transaction pointer all the way
|
||||
* to attr_set. No previous user of the generic
|
||||
* Posix ACL code seems to care about this issue either.
|
||||
*/
|
||||
if ((mask & ATTR_MODE) && !(flags & XFS_ATTR_NOACL)) {
|
||||
code = -xfs_acl_chmod(inode);
|
||||
if (code)
|
||||
return XFS_ERROR(code);
|
||||
}
|
||||
|
||||
if (DM_EVENT_ENABLED(ip, DM_EVENT_ATTRIBUTE) &&
|
||||
@ -482,8 +496,8 @@ xfs_setattr(
|
||||
commit_flags |= XFS_TRANS_ABORT;
|
||||
/* FALLTHROUGH */
|
||||
error_return:
|
||||
XFS_QM_DQRELE(mp, udqp);
|
||||
XFS_QM_DQRELE(mp, gdqp);
|
||||
xfs_qm_dqrele(udqp);
|
||||
xfs_qm_dqrele(gdqp);
|
||||
if (tp) {
|
||||
xfs_trans_cancel(tp, commit_flags);
|
||||
}
|
||||
@ -739,7 +753,8 @@ xfs_free_eofblocks(
|
||||
/*
|
||||
* Attach the dquots to the inode up front.
|
||||
*/
|
||||
if ((error = XFS_QM_DQATTACH(mp, ip, 0)))
|
||||
error = xfs_qm_dqattach(ip, 0);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
/*
|
||||
@ -1181,7 +1196,8 @@ xfs_inactive(
|
||||
|
||||
ASSERT(ip->i_d.di_nlink == 0);
|
||||
|
||||
if ((error = XFS_QM_DQATTACH(mp, ip, 0)))
|
||||
error = xfs_qm_dqattach(ip, 0);
|
||||
if (error)
|
||||
return VN_INACTIVE_CACHE;
|
||||
|
||||
tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
|
||||
@ -1307,7 +1323,7 @@ xfs_inactive(
|
||||
/*
|
||||
* Credit the quota account(s). The inode is gone.
|
||||
*/
|
||||
XFS_TRANS_MOD_DQUOT_BYINO(mp, tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
|
||||
xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
|
||||
|
||||
/*
|
||||
* Just ignore errors at this point. There is nothing we can
|
||||
@ -1323,11 +1339,11 @@ xfs_inactive(
|
||||
xfs_fs_cmn_err(CE_NOTE, mp, "xfs_inactive: "
|
||||
"xfs_trans_commit() returned error %d", error);
|
||||
}
|
||||
|
||||
/*
|
||||
* Release the dquots held by inode, if any.
|
||||
*/
|
||||
XFS_QM_DQDETACH(mp, ip);
|
||||
|
||||
xfs_qm_dqdetach(ip);
|
||||
xfs_iunlock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
|
||||
|
||||
out:
|
||||
@ -1427,8 +1443,7 @@ xfs_create(
|
||||
/*
|
||||
* Make sure that we have allocated dquot(s) on disk.
|
||||
*/
|
||||
error = XFS_QM_DQVOPALLOC(mp, dp,
|
||||
current_fsuid(), current_fsgid(), prid,
|
||||
error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid,
|
||||
XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp);
|
||||
if (error)
|
||||
goto std_return;
|
||||
@ -1489,7 +1504,7 @@ xfs_create(
|
||||
/*
|
||||
* Reserve disk quota and the inode.
|
||||
*/
|
||||
error = XFS_TRANS_RESERVE_QUOTA(mp, tp, udqp, gdqp, resblks, 1, 0);
|
||||
error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp, resblks, 1, 0);
|
||||
if (error)
|
||||
goto out_trans_cancel;
|
||||
|
||||
@ -1561,7 +1576,7 @@ xfs_create(
|
||||
* These ids of the inode couldn't have changed since the new
|
||||
* inode has been locked ever since it was created.
|
||||
*/
|
||||
XFS_QM_DQVOPCREATE(mp, tp, ip, udqp, gdqp);
|
||||
xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp);
|
||||
|
||||
/*
|
||||
* xfs_trans_commit normally decrements the vnode ref count
|
||||
@ -1580,8 +1595,8 @@ xfs_create(
|
||||
goto out_dqrele;
|
||||
}
|
||||
|
||||
XFS_QM_DQRELE(mp, udqp);
|
||||
XFS_QM_DQRELE(mp, gdqp);
|
||||
xfs_qm_dqrele(udqp);
|
||||
xfs_qm_dqrele(gdqp);
|
||||
|
||||
*ipp = ip;
|
||||
|
||||
@ -1602,8 +1617,8 @@ xfs_create(
|
||||
out_trans_cancel:
|
||||
xfs_trans_cancel(tp, cancel_flags);
|
||||
out_dqrele:
|
||||
XFS_QM_DQRELE(mp, udqp);
|
||||
XFS_QM_DQRELE(mp, gdqp);
|
||||
xfs_qm_dqrele(udqp);
|
||||
xfs_qm_dqrele(gdqp);
|
||||
|
||||
if (unlock_dp_on_error)
|
||||
xfs_iunlock(dp, XFS_ILOCK_EXCL);
|
||||
@ -1837,11 +1852,11 @@ xfs_remove(
|
||||
return error;
|
||||
}
|
||||
|
||||
error = XFS_QM_DQATTACH(mp, dp, 0);
|
||||
error = xfs_qm_dqattach(dp, 0);
|
||||
if (error)
|
||||
goto std_return;
|
||||
|
||||
error = XFS_QM_DQATTACH(mp, ip, 0);
|
||||
error = xfs_qm_dqattach(ip, 0);
|
||||
if (error)
|
||||
goto std_return;
|
||||
|
||||
@ -2028,11 +2043,11 @@ xfs_link(
|
||||
|
||||
/* Return through std_return after this point. */
|
||||
|
||||
error = XFS_QM_DQATTACH(mp, sip, 0);
|
||||
error = xfs_qm_dqattach(sip, 0);
|
||||
if (error)
|
||||
goto std_return;
|
||||
|
||||
error = XFS_QM_DQATTACH(mp, tdp, 0);
|
||||
error = xfs_qm_dqattach(tdp, 0);
|
||||
if (error)
|
||||
goto std_return;
|
||||
|
||||
@ -2205,8 +2220,7 @@ xfs_symlink(
|
||||
/*
|
||||
* Make sure that we have allocated dquot(s) on disk.
|
||||
*/
|
||||
error = XFS_QM_DQVOPALLOC(mp, dp,
|
||||
current_fsuid(), current_fsgid(), prid,
|
||||
error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid,
|
||||
XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp);
|
||||
if (error)
|
||||
goto std_return;
|
||||
@ -2248,7 +2262,7 @@ xfs_symlink(
|
||||
/*
|
||||
* Reserve disk quota : blocks and inode.
|
||||
*/
|
||||
error = XFS_TRANS_RESERVE_QUOTA(mp, tp, udqp, gdqp, resblks, 1, 0);
|
||||
error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp, resblks, 1, 0);
|
||||
if (error)
|
||||
goto error_return;
|
||||
|
||||
@ -2288,7 +2302,7 @@ xfs_symlink(
|
||||
/*
|
||||
* Also attach the dquot(s) to it, if applicable.
|
||||
*/
|
||||
XFS_QM_DQVOPCREATE(mp, tp, ip, udqp, gdqp);
|
||||
xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp);
|
||||
|
||||
if (resblks)
|
||||
resblks -= XFS_IALLOC_SPACE_RES(mp);
|
||||
@ -2376,8 +2390,8 @@ xfs_symlink(
|
||||
goto error2;
|
||||
}
|
||||
error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
|
||||
XFS_QM_DQRELE(mp, udqp);
|
||||
XFS_QM_DQRELE(mp, gdqp);
|
||||
xfs_qm_dqrele(udqp);
|
||||
xfs_qm_dqrele(gdqp);
|
||||
|
||||
/* Fall through to std_return with error = 0 or errno from
|
||||
* xfs_trans_commit */
|
||||
@ -2401,8 +2415,8 @@ xfs_symlink(
|
||||
cancel_flags |= XFS_TRANS_ABORT;
|
||||
error_return:
|
||||
xfs_trans_cancel(tp, cancel_flags);
|
||||
XFS_QM_DQRELE(mp, udqp);
|
||||
XFS_QM_DQRELE(mp, gdqp);
|
||||
xfs_qm_dqrele(udqp);
|
||||
xfs_qm_dqrele(gdqp);
|
||||
|
||||
if (unlock_dp_on_error)
|
||||
xfs_iunlock(dp, XFS_ILOCK_EXCL);
|
||||
@ -2541,7 +2555,8 @@ xfs_alloc_file_space(
|
||||
if (XFS_FORCED_SHUTDOWN(mp))
|
||||
return XFS_ERROR(EIO);
|
||||
|
||||
if ((error = XFS_QM_DQATTACH(mp, ip, 0)))
|
||||
error = xfs_qm_dqattach(ip, 0);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
if (len <= 0)
|
||||
@ -2628,8 +2643,8 @@ xfs_alloc_file_space(
|
||||
break;
|
||||
}
|
||||
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
||||
error = XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip,
|
||||
qblocks, 0, quota_flag);
|
||||
error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks,
|
||||
0, quota_flag);
|
||||
if (error)
|
||||
goto error1;
|
||||
|
||||
@ -2688,7 +2703,7 @@ xfs_alloc_file_space(
|
||||
|
||||
error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
|
||||
xfs_bmap_cancel(&free_list);
|
||||
XFS_TRANS_UNRESERVE_QUOTA_NBLKS(mp, tp, ip, qblocks, 0, quota_flag);
|
||||
xfs_trans_unreserve_quota_nblks(tp, ip, qblocks, 0, quota_flag);
|
||||
|
||||
error1: /* Just cancel transaction */
|
||||
xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
|
||||
@ -2827,7 +2842,8 @@ xfs_free_file_space(
|
||||
|
||||
xfs_itrace_entry(ip);
|
||||
|
||||
if ((error = XFS_QM_DQATTACH(mp, ip, 0)))
|
||||
error = xfs_qm_dqattach(ip, 0);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
error = 0;
|
||||
@ -2953,9 +2969,9 @@ xfs_free_file_space(
|
||||
break;
|
||||
}
|
||||
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
||||
error = XFS_TRANS_RESERVE_QUOTA(mp, tp,
|
||||
ip->i_udquot, ip->i_gdquot, resblks, 0,
|
||||
XFS_QMOPT_RES_REGBLKS);
|
||||
error = xfs_trans_reserve_quota(tp, mp,
|
||||
ip->i_udquot, ip->i_gdquot,
|
||||
resblks, 0, XFS_QMOPT_RES_REGBLKS);
|
||||
if (error)
|
||||
goto error1;
|
||||
|
||||
|
@ -18,6 +18,7 @@ int xfs_setattr(struct xfs_inode *ip, struct iattr *vap, int flags);
|
||||
#define XFS_ATTR_DMI 0x01 /* invocation from a DMI function */
|
||||
#define XFS_ATTR_NONBLOCK 0x02 /* return EAGAIN if operation would block */
|
||||
#define XFS_ATTR_NOLOCK 0x04 /* Don't grab any conflicting locks */
|
||||
#define XFS_ATTR_NOACL 0x08 /* Don't call xfs_acl_chmod */
|
||||
|
||||
int xfs_readlink(struct xfs_inode *ip, char *link);
|
||||
int xfs_fsync(struct xfs_inode *ip);
|
||||
|
Loading…
Reference in New Issue
Block a user