mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-19 21:46:47 +07:00
a79f194aa4
If a layout segment gets invalidated while a pNFS I/O operation is queued for transmission, then we ideally want to abort immediately. This is particularly the case when there is a large number of I/O related RPCs queued in the RPC layer, and the layout segment gets invalidated due to an ENOSPC error, or an EACCES (because the client was fenced). We may end up forced to spam the MDS with a lot of otherwise unnecessary LAYOUTERRORs after that I/O fails. Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
2425 lines
63 KiB
C
2425 lines
63 KiB
C
/*
|
|
* Module for pnfs flexfile layout driver.
|
|
*
|
|
* Copyright (c) 2014, Primary Data, Inc. All rights reserved.
|
|
*
|
|
* Tao Peng <bergwolf@primarydata.com>
|
|
*/
|
|
|
|
#include <linux/nfs_fs.h>
|
|
#include <linux/nfs_page.h>
|
|
#include <linux/module.h>
|
|
#include <linux/sched/mm.h>
|
|
|
|
#include <linux/sunrpc/metrics.h>
|
|
|
|
#include "flexfilelayout.h"
|
|
#include "../nfs4session.h"
|
|
#include "../nfs4idmap.h"
|
|
#include "../internal.h"
|
|
#include "../delegation.h"
|
|
#include "../nfs4trace.h"
|
|
#include "../iostat.h"
|
|
#include "../nfs.h"
|
|
#include "../nfs42.h"
|
|
|
|
#define NFSDBG_FACILITY NFSDBG_PNFS_LD
|
|
|
|
#define FF_LAYOUT_POLL_RETRY_MAX (15*HZ)
|
|
#define FF_LAYOUTRETURN_MAXERR 20
|
|
|
|
static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
|
|
struct nfs_pgio_header *hdr);
|
|
static int ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
|
|
struct nfs42_layoutstat_devinfo *devinfo,
|
|
int dev_limit);
|
|
static void ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
|
|
const struct nfs42_layoutstat_devinfo *devinfo,
|
|
struct nfs4_ff_layout_mirror *mirror);
|
|
|
|
static struct pnfs_layout_hdr *
|
|
ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
|
|
{
|
|
struct nfs4_flexfile_layout *ffl;
|
|
|
|
ffl = kzalloc(sizeof(*ffl), gfp_flags);
|
|
if (ffl) {
|
|
INIT_LIST_HEAD(&ffl->error_list);
|
|
INIT_LIST_HEAD(&ffl->mirrors);
|
|
ffl->last_report_time = ktime_get();
|
|
return &ffl->generic_hdr;
|
|
} else
|
|
return NULL;
|
|
}
|
|
|
|
static void
|
|
ff_layout_free_layout_hdr(struct pnfs_layout_hdr *lo)
|
|
{
|
|
struct nfs4_ff_layout_ds_err *err, *n;
|
|
|
|
list_for_each_entry_safe(err, n, &FF_LAYOUT_FROM_HDR(lo)->error_list,
|
|
list) {
|
|
list_del(&err->list);
|
|
kfree(err);
|
|
}
|
|
kfree(FF_LAYOUT_FROM_HDR(lo));
|
|
}
|
|
|
|
static int decode_pnfs_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
|
|
{
|
|
__be32 *p;
|
|
|
|
p = xdr_inline_decode(xdr, NFS4_STATEID_SIZE);
|
|
if (unlikely(p == NULL))
|
|
return -ENOBUFS;
|
|
stateid->type = NFS4_PNFS_DS_STATEID_TYPE;
|
|
memcpy(stateid->data, p, NFS4_STATEID_SIZE);
|
|
dprintk("%s: stateid id= [%x%x%x%x]\n", __func__,
|
|
p[0], p[1], p[2], p[3]);
|
|
return 0;
|
|
}
|
|
|
|
static int decode_deviceid(struct xdr_stream *xdr, struct nfs4_deviceid *devid)
|
|
{
|
|
__be32 *p;
|
|
|
|
p = xdr_inline_decode(xdr, NFS4_DEVICEID4_SIZE);
|
|
if (unlikely(!p))
|
|
return -ENOBUFS;
|
|
memcpy(devid, p, NFS4_DEVICEID4_SIZE);
|
|
nfs4_print_deviceid(devid);
|
|
return 0;
|
|
}
|
|
|
|
static int decode_nfs_fh(struct xdr_stream *xdr, struct nfs_fh *fh)
|
|
{
|
|
__be32 *p;
|
|
|
|
p = xdr_inline_decode(xdr, 4);
|
|
if (unlikely(!p))
|
|
return -ENOBUFS;
|
|
fh->size = be32_to_cpup(p++);
|
|
if (fh->size > sizeof(struct nfs_fh)) {
|
|
printk(KERN_ERR "NFS flexfiles: Too big fh received %d\n",
|
|
fh->size);
|
|
return -EOVERFLOW;
|
|
}
|
|
/* fh.data */
|
|
p = xdr_inline_decode(xdr, fh->size);
|
|
if (unlikely(!p))
|
|
return -ENOBUFS;
|
|
memcpy(&fh->data, p, fh->size);
|
|
dprintk("%s: fh len %d\n", __func__, fh->size);
|
|
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* Currently only stringified uids and gids are accepted.
|
|
* I.e., kerberos is not supported to the DSes, so no pricipals.
|
|
*
|
|
* That means that one common function will suffice, but when
|
|
* principals are added, this should be split to accomodate
|
|
* calls to both nfs_map_name_to_uid() and nfs_map_group_to_gid().
|
|
*/
|
|
static int
|
|
decode_name(struct xdr_stream *xdr, u32 *id)
|
|
{
|
|
__be32 *p;
|
|
int len;
|
|
|
|
/* opaque_length(4)*/
|
|
p = xdr_inline_decode(xdr, 4);
|
|
if (unlikely(!p))
|
|
return -ENOBUFS;
|
|
len = be32_to_cpup(p++);
|
|
if (len < 0)
|
|
return -EINVAL;
|
|
|
|
dprintk("%s: len %u\n", __func__, len);
|
|
|
|
/* opaque body */
|
|
p = xdr_inline_decode(xdr, len);
|
|
if (unlikely(!p))
|
|
return -ENOBUFS;
|
|
|
|
if (!nfs_map_string_to_numeric((char *)p, len, id))
|
|
return -EINVAL;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static bool ff_mirror_match_fh(const struct nfs4_ff_layout_mirror *m1,
|
|
const struct nfs4_ff_layout_mirror *m2)
|
|
{
|
|
int i, j;
|
|
|
|
if (m1->fh_versions_cnt != m2->fh_versions_cnt)
|
|
return false;
|
|
for (i = 0; i < m1->fh_versions_cnt; i++) {
|
|
bool found_fh = false;
|
|
for (j = 0; j < m2->fh_versions_cnt; j++) {
|
|
if (nfs_compare_fh(&m1->fh_versions[i],
|
|
&m2->fh_versions[j]) == 0) {
|
|
found_fh = true;
|
|
break;
|
|
}
|
|
}
|
|
if (!found_fh)
|
|
return false;
|
|
}
|
|
return true;
|
|
}
|
|
|
|
static struct nfs4_ff_layout_mirror *
|
|
ff_layout_add_mirror(struct pnfs_layout_hdr *lo,
|
|
struct nfs4_ff_layout_mirror *mirror)
|
|
{
|
|
struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
|
|
struct nfs4_ff_layout_mirror *pos;
|
|
struct inode *inode = lo->plh_inode;
|
|
|
|
spin_lock(&inode->i_lock);
|
|
list_for_each_entry(pos, &ff_layout->mirrors, mirrors) {
|
|
if (memcmp(&mirror->devid, &pos->devid, sizeof(pos->devid)) != 0)
|
|
continue;
|
|
if (!ff_mirror_match_fh(mirror, pos))
|
|
continue;
|
|
if (refcount_inc_not_zero(&pos->ref)) {
|
|
spin_unlock(&inode->i_lock);
|
|
return pos;
|
|
}
|
|
}
|
|
list_add(&mirror->mirrors, &ff_layout->mirrors);
|
|
mirror->layout = lo;
|
|
spin_unlock(&inode->i_lock);
|
|
return mirror;
|
|
}
|
|
|
|
static void
|
|
ff_layout_remove_mirror(struct nfs4_ff_layout_mirror *mirror)
|
|
{
|
|
struct inode *inode;
|
|
if (mirror->layout == NULL)
|
|
return;
|
|
inode = mirror->layout->plh_inode;
|
|
spin_lock(&inode->i_lock);
|
|
list_del(&mirror->mirrors);
|
|
spin_unlock(&inode->i_lock);
|
|
mirror->layout = NULL;
|
|
}
|
|
|
|
static struct nfs4_ff_layout_mirror *ff_layout_alloc_mirror(gfp_t gfp_flags)
|
|
{
|
|
struct nfs4_ff_layout_mirror *mirror;
|
|
|
|
mirror = kzalloc(sizeof(*mirror), gfp_flags);
|
|
if (mirror != NULL) {
|
|
spin_lock_init(&mirror->lock);
|
|
refcount_set(&mirror->ref, 1);
|
|
INIT_LIST_HEAD(&mirror->mirrors);
|
|
}
|
|
return mirror;
|
|
}
|
|
|
|
static void ff_layout_free_mirror(struct nfs4_ff_layout_mirror *mirror)
|
|
{
|
|
const struct cred *cred;
|
|
|
|
ff_layout_remove_mirror(mirror);
|
|
kfree(mirror->fh_versions);
|
|
cred = rcu_access_pointer(mirror->ro_cred);
|
|
put_cred(cred);
|
|
cred = rcu_access_pointer(mirror->rw_cred);
|
|
put_cred(cred);
|
|
nfs4_ff_layout_put_deviceid(mirror->mirror_ds);
|
|
kfree(mirror);
|
|
}
|
|
|
|
static void ff_layout_put_mirror(struct nfs4_ff_layout_mirror *mirror)
|
|
{
|
|
if (mirror != NULL && refcount_dec_and_test(&mirror->ref))
|
|
ff_layout_free_mirror(mirror);
|
|
}
|
|
|
|
static void ff_layout_free_mirror_array(struct nfs4_ff_layout_segment *fls)
|
|
{
|
|
int i;
|
|
|
|
if (fls->mirror_array) {
|
|
for (i = 0; i < fls->mirror_array_cnt; i++) {
|
|
/* normally mirror_ds is freed in
|
|
* .free_deviceid_node but we still do it here
|
|
* for .alloc_lseg error path */
|
|
ff_layout_put_mirror(fls->mirror_array[i]);
|
|
}
|
|
kfree(fls->mirror_array);
|
|
fls->mirror_array = NULL;
|
|
}
|
|
}
|
|
|
|
static int ff_layout_check_layout(struct nfs4_layoutget_res *lgr)
|
|
{
|
|
int ret = 0;
|
|
|
|
dprintk("--> %s\n", __func__);
|
|
|
|
/* FIXME: remove this check when layout segment support is added */
|
|
if (lgr->range.offset != 0 ||
|
|
lgr->range.length != NFS4_MAX_UINT64) {
|
|
dprintk("%s Only whole file layouts supported. Use MDS i/o\n",
|
|
__func__);
|
|
ret = -EINVAL;
|
|
}
|
|
|
|
dprintk("--> %s returns %d\n", __func__, ret);
|
|
return ret;
|
|
}
|
|
|
|
static void _ff_layout_free_lseg(struct nfs4_ff_layout_segment *fls)
|
|
{
|
|
if (fls) {
|
|
ff_layout_free_mirror_array(fls);
|
|
kfree(fls);
|
|
}
|
|
}
|
|
|
|
static bool
|
|
ff_lseg_range_is_after(const struct pnfs_layout_range *l1,
|
|
const struct pnfs_layout_range *l2)
|
|
{
|
|
u64 end1, end2;
|
|
|
|
if (l1->iomode != l2->iomode)
|
|
return l1->iomode != IOMODE_READ;
|
|
end1 = pnfs_calc_offset_end(l1->offset, l1->length);
|
|
end2 = pnfs_calc_offset_end(l2->offset, l2->length);
|
|
if (end1 < l2->offset)
|
|
return false;
|
|
if (end2 < l1->offset)
|
|
return true;
|
|
return l2->offset <= l1->offset;
|
|
}
|
|
|
|
static bool
|
|
ff_lseg_merge(struct pnfs_layout_segment *new,
|
|
struct pnfs_layout_segment *old)
|
|
{
|
|
u64 new_end, old_end;
|
|
|
|
if (test_bit(NFS_LSEG_LAYOUTRETURN, &old->pls_flags))
|
|
return false;
|
|
if (new->pls_range.iomode != old->pls_range.iomode)
|
|
return false;
|
|
old_end = pnfs_calc_offset_end(old->pls_range.offset,
|
|
old->pls_range.length);
|
|
if (old_end < new->pls_range.offset)
|
|
return false;
|
|
new_end = pnfs_calc_offset_end(new->pls_range.offset,
|
|
new->pls_range.length);
|
|
if (new_end < old->pls_range.offset)
|
|
return false;
|
|
|
|
/* Mergeable: copy info from 'old' to 'new' */
|
|
if (new_end < old_end)
|
|
new_end = old_end;
|
|
if (new->pls_range.offset < old->pls_range.offset)
|
|
new->pls_range.offset = old->pls_range.offset;
|
|
new->pls_range.length = pnfs_calc_offset_length(new->pls_range.offset,
|
|
new_end);
|
|
if (test_bit(NFS_LSEG_ROC, &old->pls_flags))
|
|
set_bit(NFS_LSEG_ROC, &new->pls_flags);
|
|
return true;
|
|
}
|
|
|
|
static void
|
|
ff_layout_add_lseg(struct pnfs_layout_hdr *lo,
|
|
struct pnfs_layout_segment *lseg,
|
|
struct list_head *free_me)
|
|
{
|
|
pnfs_generic_layout_insert_lseg(lo, lseg,
|
|
ff_lseg_range_is_after,
|
|
ff_lseg_merge,
|
|
free_me);
|
|
}
|
|
|
|
static void ff_layout_sort_mirrors(struct nfs4_ff_layout_segment *fls)
|
|
{
|
|
int i, j;
|
|
|
|
for (i = 0; i < fls->mirror_array_cnt - 1; i++) {
|
|
for (j = i + 1; j < fls->mirror_array_cnt; j++)
|
|
if (fls->mirror_array[i]->efficiency <
|
|
fls->mirror_array[j]->efficiency)
|
|
swap(fls->mirror_array[i],
|
|
fls->mirror_array[j]);
|
|
}
|
|
}
|
|
|
|
static struct pnfs_layout_segment *
|
|
ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
|
|
struct nfs4_layoutget_res *lgr,
|
|
gfp_t gfp_flags)
|
|
{
|
|
struct pnfs_layout_segment *ret;
|
|
struct nfs4_ff_layout_segment *fls = NULL;
|
|
struct xdr_stream stream;
|
|
struct xdr_buf buf;
|
|
struct page *scratch;
|
|
u64 stripe_unit;
|
|
u32 mirror_array_cnt;
|
|
__be32 *p;
|
|
int i, rc;
|
|
|
|
dprintk("--> %s\n", __func__);
|
|
scratch = alloc_page(gfp_flags);
|
|
if (!scratch)
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages,
|
|
lgr->layoutp->len);
|
|
xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
|
|
|
|
/* stripe unit and mirror_array_cnt */
|
|
rc = -EIO;
|
|
p = xdr_inline_decode(&stream, 8 + 4);
|
|
if (!p)
|
|
goto out_err_free;
|
|
|
|
p = xdr_decode_hyper(p, &stripe_unit);
|
|
mirror_array_cnt = be32_to_cpup(p++);
|
|
dprintk("%s: stripe_unit=%llu mirror_array_cnt=%u\n", __func__,
|
|
stripe_unit, mirror_array_cnt);
|
|
|
|
if (mirror_array_cnt > NFS4_FLEXFILE_LAYOUT_MAX_MIRROR_CNT ||
|
|
mirror_array_cnt == 0)
|
|
goto out_err_free;
|
|
|
|
rc = -ENOMEM;
|
|
fls = kzalloc(sizeof(*fls), gfp_flags);
|
|
if (!fls)
|
|
goto out_err_free;
|
|
|
|
fls->mirror_array_cnt = mirror_array_cnt;
|
|
fls->stripe_unit = stripe_unit;
|
|
fls->mirror_array = kcalloc(fls->mirror_array_cnt,
|
|
sizeof(fls->mirror_array[0]), gfp_flags);
|
|
if (fls->mirror_array == NULL)
|
|
goto out_err_free;
|
|
|
|
for (i = 0; i < fls->mirror_array_cnt; i++) {
|
|
struct nfs4_ff_layout_mirror *mirror;
|
|
struct cred *kcred;
|
|
const struct cred __rcu *cred;
|
|
kuid_t uid;
|
|
kgid_t gid;
|
|
u32 ds_count, fh_count, id;
|
|
int j;
|
|
|
|
rc = -EIO;
|
|
p = xdr_inline_decode(&stream, 4);
|
|
if (!p)
|
|
goto out_err_free;
|
|
ds_count = be32_to_cpup(p);
|
|
|
|
/* FIXME: allow for striping? */
|
|
if (ds_count != 1)
|
|
goto out_err_free;
|
|
|
|
fls->mirror_array[i] = ff_layout_alloc_mirror(gfp_flags);
|
|
if (fls->mirror_array[i] == NULL) {
|
|
rc = -ENOMEM;
|
|
goto out_err_free;
|
|
}
|
|
|
|
fls->mirror_array[i]->ds_count = ds_count;
|
|
|
|
/* deviceid */
|
|
rc = decode_deviceid(&stream, &fls->mirror_array[i]->devid);
|
|
if (rc)
|
|
goto out_err_free;
|
|
|
|
/* efficiency */
|
|
rc = -EIO;
|
|
p = xdr_inline_decode(&stream, 4);
|
|
if (!p)
|
|
goto out_err_free;
|
|
fls->mirror_array[i]->efficiency = be32_to_cpup(p);
|
|
|
|
/* stateid */
|
|
rc = decode_pnfs_stateid(&stream, &fls->mirror_array[i]->stateid);
|
|
if (rc)
|
|
goto out_err_free;
|
|
|
|
/* fh */
|
|
rc = -EIO;
|
|
p = xdr_inline_decode(&stream, 4);
|
|
if (!p)
|
|
goto out_err_free;
|
|
fh_count = be32_to_cpup(p);
|
|
|
|
fls->mirror_array[i]->fh_versions =
|
|
kcalloc(fh_count, sizeof(struct nfs_fh),
|
|
gfp_flags);
|
|
if (fls->mirror_array[i]->fh_versions == NULL) {
|
|
rc = -ENOMEM;
|
|
goto out_err_free;
|
|
}
|
|
|
|
for (j = 0; j < fh_count; j++) {
|
|
rc = decode_nfs_fh(&stream,
|
|
&fls->mirror_array[i]->fh_versions[j]);
|
|
if (rc)
|
|
goto out_err_free;
|
|
}
|
|
|
|
fls->mirror_array[i]->fh_versions_cnt = fh_count;
|
|
|
|
/* user */
|
|
rc = decode_name(&stream, &id);
|
|
if (rc)
|
|
goto out_err_free;
|
|
|
|
uid = make_kuid(&init_user_ns, id);
|
|
|
|
/* group */
|
|
rc = decode_name(&stream, &id);
|
|
if (rc)
|
|
goto out_err_free;
|
|
|
|
gid = make_kgid(&init_user_ns, id);
|
|
|
|
if (gfp_flags & __GFP_FS)
|
|
kcred = prepare_kernel_cred(NULL);
|
|
else {
|
|
unsigned int nofs_flags = memalloc_nofs_save();
|
|
kcred = prepare_kernel_cred(NULL);
|
|
memalloc_nofs_restore(nofs_flags);
|
|
}
|
|
rc = -ENOMEM;
|
|
if (!kcred)
|
|
goto out_err_free;
|
|
kcred->fsuid = uid;
|
|
kcred->fsgid = gid;
|
|
cred = RCU_INITIALIZER(kcred);
|
|
|
|
if (lgr->range.iomode == IOMODE_READ)
|
|
rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
|
|
else
|
|
rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred);
|
|
|
|
mirror = ff_layout_add_mirror(lh, fls->mirror_array[i]);
|
|
if (mirror != fls->mirror_array[i]) {
|
|
/* swap cred ptrs so free_mirror will clean up old */
|
|
if (lgr->range.iomode == IOMODE_READ) {
|
|
cred = xchg(&mirror->ro_cred, cred);
|
|
rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
|
|
} else {
|
|
cred = xchg(&mirror->rw_cred, cred);
|
|
rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred);
|
|
}
|
|
ff_layout_free_mirror(fls->mirror_array[i]);
|
|
fls->mirror_array[i] = mirror;
|
|
}
|
|
|
|
dprintk("%s: iomode %s uid %u gid %u\n", __func__,
|
|
lgr->range.iomode == IOMODE_READ ? "READ" : "RW",
|
|
from_kuid(&init_user_ns, uid),
|
|
from_kgid(&init_user_ns, gid));
|
|
}
|
|
|
|
p = xdr_inline_decode(&stream, 4);
|
|
if (!p)
|
|
goto out_sort_mirrors;
|
|
fls->flags = be32_to_cpup(p);
|
|
|
|
p = xdr_inline_decode(&stream, 4);
|
|
if (!p)
|
|
goto out_sort_mirrors;
|
|
for (i=0; i < fls->mirror_array_cnt; i++)
|
|
fls->mirror_array[i]->report_interval = be32_to_cpup(p);
|
|
|
|
out_sort_mirrors:
|
|
ff_layout_sort_mirrors(fls);
|
|
rc = ff_layout_check_layout(lgr);
|
|
if (rc)
|
|
goto out_err_free;
|
|
ret = &fls->generic_hdr;
|
|
dprintk("<-- %s (success)\n", __func__);
|
|
out_free_page:
|
|
__free_page(scratch);
|
|
return ret;
|
|
out_err_free:
|
|
_ff_layout_free_lseg(fls);
|
|
ret = ERR_PTR(rc);
|
|
dprintk("<-- %s (%d)\n", __func__, rc);
|
|
goto out_free_page;
|
|
}
|
|
|
|
static bool ff_layout_has_rw_segments(struct pnfs_layout_hdr *layout)
|
|
{
|
|
struct pnfs_layout_segment *lseg;
|
|
|
|
list_for_each_entry(lseg, &layout->plh_segs, pls_list)
|
|
if (lseg->pls_range.iomode == IOMODE_RW)
|
|
return true;
|
|
|
|
return false;
|
|
}
|
|
|
|
static void
|
|
ff_layout_free_lseg(struct pnfs_layout_segment *lseg)
|
|
{
|
|
struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
|
|
|
|
dprintk("--> %s\n", __func__);
|
|
|
|
if (lseg->pls_range.iomode == IOMODE_RW) {
|
|
struct nfs4_flexfile_layout *ffl;
|
|
struct inode *inode;
|
|
|
|
ffl = FF_LAYOUT_FROM_HDR(lseg->pls_layout);
|
|
inode = ffl->generic_hdr.plh_inode;
|
|
spin_lock(&inode->i_lock);
|
|
if (!ff_layout_has_rw_segments(lseg->pls_layout)) {
|
|
ffl->commit_info.nbuckets = 0;
|
|
kfree(ffl->commit_info.buckets);
|
|
ffl->commit_info.buckets = NULL;
|
|
}
|
|
spin_unlock(&inode->i_lock);
|
|
}
|
|
_ff_layout_free_lseg(fls);
|
|
}
|
|
|
|
/* Return 1 until we have multiple lsegs support */
|
|
static int
|
|
ff_layout_get_lseg_count(struct nfs4_ff_layout_segment *fls)
|
|
{
|
|
return 1;
|
|
}
|
|
|
|
static void
|
|
nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
|
|
{
|
|
/* first IO request? */
|
|
if (atomic_inc_return(&timer->n_ops) == 1) {
|
|
timer->start_time = now;
|
|
}
|
|
}
|
|
|
|
static ktime_t
|
|
nfs4_ff_end_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
|
|
{
|
|
ktime_t start;
|
|
|
|
if (atomic_dec_return(&timer->n_ops) < 0)
|
|
WARN_ON_ONCE(1);
|
|
|
|
start = timer->start_time;
|
|
timer->start_time = now;
|
|
return ktime_sub(now, start);
|
|
}
|
|
|
|
static bool
|
|
nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror *mirror,
|
|
struct nfs4_ff_layoutstat *layoutstat,
|
|
ktime_t now)
|
|
{
|
|
s64 report_interval = FF_LAYOUTSTATS_REPORT_INTERVAL;
|
|
struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(mirror->layout);
|
|
|
|
nfs4_ff_start_busy_timer(&layoutstat->busy_timer, now);
|
|
if (!mirror->start_time)
|
|
mirror->start_time = now;
|
|
if (mirror->report_interval != 0)
|
|
report_interval = (s64)mirror->report_interval * 1000LL;
|
|
else if (layoutstats_timer != 0)
|
|
report_interval = (s64)layoutstats_timer * 1000LL;
|
|
if (ktime_to_ms(ktime_sub(now, ffl->last_report_time)) >=
|
|
report_interval) {
|
|
ffl->last_report_time = now;
|
|
return true;
|
|
}
|
|
|
|
return false;
|
|
}
|
|
|
|
static void
|
|
nfs4_ff_layout_stat_io_update_requested(struct nfs4_ff_layoutstat *layoutstat,
|
|
__u64 requested)
|
|
{
|
|
struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
|
|
|
|
iostat->ops_requested++;
|
|
iostat->bytes_requested += requested;
|
|
}
|
|
|
|
static void
|
|
nfs4_ff_layout_stat_io_update_completed(struct nfs4_ff_layoutstat *layoutstat,
|
|
__u64 requested,
|
|
__u64 completed,
|
|
ktime_t time_completed,
|
|
ktime_t time_started)
|
|
{
|
|
struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
|
|
ktime_t completion_time = ktime_sub(time_completed, time_started);
|
|
ktime_t timer;
|
|
|
|
iostat->ops_completed++;
|
|
iostat->bytes_completed += completed;
|
|
iostat->bytes_not_delivered += requested - completed;
|
|
|
|
timer = nfs4_ff_end_busy_timer(&layoutstat->busy_timer, time_completed);
|
|
iostat->total_busy_time =
|
|
ktime_add(iostat->total_busy_time, timer);
|
|
iostat->aggregate_completion_time =
|
|
ktime_add(iostat->aggregate_completion_time,
|
|
completion_time);
|
|
}
|
|
|
|
static void
|
|
nfs4_ff_layout_stat_io_start_read(struct inode *inode,
|
|
struct nfs4_ff_layout_mirror *mirror,
|
|
__u64 requested, ktime_t now)
|
|
{
|
|
bool report;
|
|
|
|
spin_lock(&mirror->lock);
|
|
report = nfs4_ff_layoutstat_start_io(mirror, &mirror->read_stat, now);
|
|
nfs4_ff_layout_stat_io_update_requested(&mirror->read_stat, requested);
|
|
set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
|
|
spin_unlock(&mirror->lock);
|
|
|
|
if (report)
|
|
pnfs_report_layoutstat(inode, GFP_KERNEL);
|
|
}
|
|
|
|
static void
|
|
nfs4_ff_layout_stat_io_end_read(struct rpc_task *task,
|
|
struct nfs4_ff_layout_mirror *mirror,
|
|
__u64 requested,
|
|
__u64 completed)
|
|
{
|
|
spin_lock(&mirror->lock);
|
|
nfs4_ff_layout_stat_io_update_completed(&mirror->read_stat,
|
|
requested, completed,
|
|
ktime_get(), task->tk_start);
|
|
set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
|
|
spin_unlock(&mirror->lock);
|
|
}
|
|
|
|
static void
|
|
nfs4_ff_layout_stat_io_start_write(struct inode *inode,
|
|
struct nfs4_ff_layout_mirror *mirror,
|
|
__u64 requested, ktime_t now)
|
|
{
|
|
bool report;
|
|
|
|
spin_lock(&mirror->lock);
|
|
report = nfs4_ff_layoutstat_start_io(mirror , &mirror->write_stat, now);
|
|
nfs4_ff_layout_stat_io_update_requested(&mirror->write_stat, requested);
|
|
set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
|
|
spin_unlock(&mirror->lock);
|
|
|
|
if (report)
|
|
pnfs_report_layoutstat(inode, GFP_NOIO);
|
|
}
|
|
|
|
static void
|
|
nfs4_ff_layout_stat_io_end_write(struct rpc_task *task,
|
|
struct nfs4_ff_layout_mirror *mirror,
|
|
__u64 requested,
|
|
__u64 completed,
|
|
enum nfs3_stable_how committed)
|
|
{
|
|
if (committed == NFS_UNSTABLE)
|
|
requested = completed = 0;
|
|
|
|
spin_lock(&mirror->lock);
|
|
nfs4_ff_layout_stat_io_update_completed(&mirror->write_stat,
|
|
requested, completed, ktime_get(), task->tk_start);
|
|
set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
|
|
spin_unlock(&mirror->lock);
|
|
}
|
|
|
|
static int
|
|
ff_layout_alloc_commit_info(struct pnfs_layout_segment *lseg,
|
|
struct nfs_commit_info *cinfo,
|
|
gfp_t gfp_flags)
|
|
{
|
|
struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
|
|
struct pnfs_commit_bucket *buckets;
|
|
int size;
|
|
|
|
if (cinfo->ds->nbuckets != 0) {
|
|
/* This assumes there is only one RW lseg per file.
|
|
* To support multiple lseg per file, we need to
|
|
* change struct pnfs_commit_bucket to allow dynamic
|
|
* increasing nbuckets.
|
|
*/
|
|
return 0;
|
|
}
|
|
|
|
size = ff_layout_get_lseg_count(fls) * FF_LAYOUT_MIRROR_COUNT(lseg);
|
|
|
|
buckets = kcalloc(size, sizeof(struct pnfs_commit_bucket),
|
|
gfp_flags);
|
|
if (!buckets)
|
|
return -ENOMEM;
|
|
else {
|
|
int i;
|
|
|
|
spin_lock(&cinfo->inode->i_lock);
|
|
if (cinfo->ds->nbuckets != 0)
|
|
kfree(buckets);
|
|
else {
|
|
cinfo->ds->buckets = buckets;
|
|
cinfo->ds->nbuckets = size;
|
|
for (i = 0; i < size; i++) {
|
|
INIT_LIST_HEAD(&buckets[i].written);
|
|
INIT_LIST_HEAD(&buckets[i].committing);
|
|
/* mark direct verifier as unset */
|
|
buckets[i].direct_verf.committed =
|
|
NFS_INVALID_STABLE_HOW;
|
|
}
|
|
}
|
|
spin_unlock(&cinfo->inode->i_lock);
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
static struct nfs4_pnfs_ds *
|
|
ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment *lseg,
|
|
int start_idx,
|
|
int *best_idx)
|
|
{
|
|
struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
|
|
struct nfs4_pnfs_ds *ds;
|
|
bool fail_return = false;
|
|
int idx;
|
|
|
|
/* mirrors are sorted by efficiency */
|
|
for (idx = start_idx; idx < fls->mirror_array_cnt; idx++) {
|
|
if (idx+1 == fls->mirror_array_cnt)
|
|
fail_return = true;
|
|
ds = nfs4_ff_layout_prepare_ds(lseg, idx, fail_return);
|
|
if (ds) {
|
|
*best_idx = idx;
|
|
return ds;
|
|
}
|
|
}
|
|
|
|
return NULL;
|
|
}
|
|
|
|
static void
|
|
ff_layout_pg_get_read(struct nfs_pageio_descriptor *pgio,
|
|
struct nfs_page *req,
|
|
bool strict_iomode)
|
|
{
|
|
pnfs_put_lseg(pgio->pg_lseg);
|
|
pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
|
|
req->wb_context,
|
|
0,
|
|
NFS4_MAX_UINT64,
|
|
IOMODE_READ,
|
|
strict_iomode,
|
|
GFP_KERNEL);
|
|
if (IS_ERR(pgio->pg_lseg)) {
|
|
pgio->pg_error = PTR_ERR(pgio->pg_lseg);
|
|
pgio->pg_lseg = NULL;
|
|
}
|
|
}
|
|
|
|
static void
|
|
ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
|
|
struct nfs_page *req)
|
|
{
|
|
struct nfs_pgio_mirror *pgm;
|
|
struct nfs4_ff_layout_mirror *mirror;
|
|
struct nfs4_pnfs_ds *ds;
|
|
int ds_idx;
|
|
|
|
retry:
|
|
pnfs_generic_pg_check_layout(pgio);
|
|
/* Use full layout for now */
|
|
if (!pgio->pg_lseg) {
|
|
ff_layout_pg_get_read(pgio, req, false);
|
|
if (!pgio->pg_lseg)
|
|
goto out_nolseg;
|
|
}
|
|
if (ff_layout_avoid_read_on_rw(pgio->pg_lseg)) {
|
|
ff_layout_pg_get_read(pgio, req, true);
|
|
if (!pgio->pg_lseg)
|
|
goto out_nolseg;
|
|
}
|
|
|
|
ds = ff_layout_choose_best_ds_for_read(pgio->pg_lseg, 0, &ds_idx);
|
|
if (!ds) {
|
|
if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
|
|
goto out_mds;
|
|
pnfs_put_lseg(pgio->pg_lseg);
|
|
pgio->pg_lseg = NULL;
|
|
/* Sleep for 1 second before retrying */
|
|
ssleep(1);
|
|
goto retry;
|
|
}
|
|
|
|
mirror = FF_LAYOUT_COMP(pgio->pg_lseg, ds_idx);
|
|
|
|
pgio->pg_mirror_idx = ds_idx;
|
|
|
|
/* read always uses only one mirror - idx 0 for pgio layer */
|
|
pgm = &pgio->pg_mirrors[0];
|
|
pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize;
|
|
|
|
return;
|
|
out_nolseg:
|
|
if (pgio->pg_error < 0)
|
|
return;
|
|
out_mds:
|
|
pnfs_put_lseg(pgio->pg_lseg);
|
|
pgio->pg_lseg = NULL;
|
|
nfs_pageio_reset_read_mds(pgio);
|
|
}
|
|
|
|
static void
|
|
ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
|
|
struct nfs_page *req)
|
|
{
|
|
struct nfs4_ff_layout_mirror *mirror;
|
|
struct nfs_pgio_mirror *pgm;
|
|
struct nfs_commit_info cinfo;
|
|
struct nfs4_pnfs_ds *ds;
|
|
int i;
|
|
int status;
|
|
|
|
retry:
|
|
pnfs_generic_pg_check_layout(pgio);
|
|
if (!pgio->pg_lseg) {
|
|
pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
|
|
req->wb_context,
|
|
0,
|
|
NFS4_MAX_UINT64,
|
|
IOMODE_RW,
|
|
false,
|
|
GFP_NOFS);
|
|
if (IS_ERR(pgio->pg_lseg)) {
|
|
pgio->pg_error = PTR_ERR(pgio->pg_lseg);
|
|
pgio->pg_lseg = NULL;
|
|
return;
|
|
}
|
|
}
|
|
/* If no lseg, fall back to write through mds */
|
|
if (pgio->pg_lseg == NULL)
|
|
goto out_mds;
|
|
|
|
nfs_init_cinfo(&cinfo, pgio->pg_inode, pgio->pg_dreq);
|
|
status = ff_layout_alloc_commit_info(pgio->pg_lseg, &cinfo, GFP_NOFS);
|
|
if (status < 0)
|
|
goto out_mds;
|
|
|
|
/* Use a direct mapping of ds_idx to pgio mirror_idx */
|
|
if (WARN_ON_ONCE(pgio->pg_mirror_count !=
|
|
FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg)))
|
|
goto out_mds;
|
|
|
|
for (i = 0; i < pgio->pg_mirror_count; i++) {
|
|
ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, i, true);
|
|
if (!ds) {
|
|
if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
|
|
goto out_mds;
|
|
pnfs_put_lseg(pgio->pg_lseg);
|
|
pgio->pg_lseg = NULL;
|
|
/* Sleep for 1 second before retrying */
|
|
ssleep(1);
|
|
goto retry;
|
|
}
|
|
pgm = &pgio->pg_mirrors[i];
|
|
mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
|
|
pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize;
|
|
}
|
|
|
|
return;
|
|
|
|
out_mds:
|
|
pnfs_put_lseg(pgio->pg_lseg);
|
|
pgio->pg_lseg = NULL;
|
|
nfs_pageio_reset_write_mds(pgio);
|
|
}
|
|
|
|
static unsigned int
|
|
ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor *pgio,
|
|
struct nfs_page *req)
|
|
{
|
|
if (!pgio->pg_lseg) {
|
|
pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
|
|
req->wb_context,
|
|
0,
|
|
NFS4_MAX_UINT64,
|
|
IOMODE_RW,
|
|
false,
|
|
GFP_NOFS);
|
|
if (IS_ERR(pgio->pg_lseg)) {
|
|
pgio->pg_error = PTR_ERR(pgio->pg_lseg);
|
|
pgio->pg_lseg = NULL;
|
|
goto out;
|
|
}
|
|
}
|
|
if (pgio->pg_lseg)
|
|
return FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg);
|
|
|
|
/* no lseg means that pnfs is not in use, so no mirroring here */
|
|
nfs_pageio_reset_write_mds(pgio);
|
|
out:
|
|
return 1;
|
|
}
|
|
|
|
static const struct nfs_pageio_ops ff_layout_pg_read_ops = {
|
|
.pg_init = ff_layout_pg_init_read,
|
|
.pg_test = pnfs_generic_pg_test,
|
|
.pg_doio = pnfs_generic_pg_readpages,
|
|
.pg_cleanup = pnfs_generic_pg_cleanup,
|
|
};
|
|
|
|
static const struct nfs_pageio_ops ff_layout_pg_write_ops = {
|
|
.pg_init = ff_layout_pg_init_write,
|
|
.pg_test = pnfs_generic_pg_test,
|
|
.pg_doio = pnfs_generic_pg_writepages,
|
|
.pg_get_mirror_count = ff_layout_pg_get_mirror_count_write,
|
|
.pg_cleanup = pnfs_generic_pg_cleanup,
|
|
};
|
|
|
|
static void ff_layout_reset_write(struct nfs_pgio_header *hdr, bool retry_pnfs)
|
|
{
|
|
struct rpc_task *task = &hdr->task;
|
|
|
|
pnfs_layoutcommit_inode(hdr->inode, false);
|
|
|
|
if (retry_pnfs) {
|
|
dprintk("%s Reset task %5u for i/o through pNFS "
|
|
"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
|
|
hdr->task.tk_pid,
|
|
hdr->inode->i_sb->s_id,
|
|
(unsigned long long)NFS_FILEID(hdr->inode),
|
|
hdr->args.count,
|
|
(unsigned long long)hdr->args.offset);
|
|
|
|
hdr->completion_ops->reschedule_io(hdr);
|
|
return;
|
|
}
|
|
|
|
if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
|
|
dprintk("%s Reset task %5u for i/o through MDS "
|
|
"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
|
|
hdr->task.tk_pid,
|
|
hdr->inode->i_sb->s_id,
|
|
(unsigned long long)NFS_FILEID(hdr->inode),
|
|
hdr->args.count,
|
|
(unsigned long long)hdr->args.offset);
|
|
|
|
task->tk_status = pnfs_write_done_resend_to_mds(hdr);
|
|
}
|
|
}
|
|
|
|
static void ff_layout_reset_read(struct nfs_pgio_header *hdr)
|
|
{
|
|
struct rpc_task *task = &hdr->task;
|
|
|
|
pnfs_layoutcommit_inode(hdr->inode, false);
|
|
|
|
if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
|
|
dprintk("%s Reset task %5u for i/o through MDS "
|
|
"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
|
|
hdr->task.tk_pid,
|
|
hdr->inode->i_sb->s_id,
|
|
(unsigned long long)NFS_FILEID(hdr->inode),
|
|
hdr->args.count,
|
|
(unsigned long long)hdr->args.offset);
|
|
|
|
task->tk_status = pnfs_read_done_resend_to_mds(hdr);
|
|
}
|
|
}
|
|
|
|
static int ff_layout_async_handle_error_v4(struct rpc_task *task,
|
|
struct nfs4_state *state,
|
|
struct nfs_client *clp,
|
|
struct pnfs_layout_segment *lseg,
|
|
int idx)
|
|
{
|
|
struct pnfs_layout_hdr *lo = lseg->pls_layout;
|
|
struct inode *inode = lo->plh_inode;
|
|
struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
|
|
struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table;
|
|
|
|
switch (task->tk_status) {
|
|
case -NFS4ERR_BADSESSION:
|
|
case -NFS4ERR_BADSLOT:
|
|
case -NFS4ERR_BAD_HIGH_SLOT:
|
|
case -NFS4ERR_DEADSESSION:
|
|
case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
|
|
case -NFS4ERR_SEQ_FALSE_RETRY:
|
|
case -NFS4ERR_SEQ_MISORDERED:
|
|
dprintk("%s ERROR %d, Reset session. Exchangeid "
|
|
"flags 0x%x\n", __func__, task->tk_status,
|
|
clp->cl_exchange_flags);
|
|
nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
|
|
break;
|
|
case -NFS4ERR_DELAY:
|
|
case -NFS4ERR_GRACE:
|
|
rpc_delay(task, FF_LAYOUT_POLL_RETRY_MAX);
|
|
break;
|
|
case -NFS4ERR_RETRY_UNCACHED_REP:
|
|
break;
|
|
case -EAGAIN:
|
|
return -NFS4ERR_RESET_TO_PNFS;
|
|
/* Invalidate Layout errors */
|
|
case -NFS4ERR_PNFS_NO_LAYOUT:
|
|
case -ESTALE: /* mapped NFS4ERR_STALE */
|
|
case -EBADHANDLE: /* mapped NFS4ERR_BADHANDLE */
|
|
case -EISDIR: /* mapped NFS4ERR_ISDIR */
|
|
case -NFS4ERR_FHEXPIRED:
|
|
case -NFS4ERR_WRONG_TYPE:
|
|
dprintk("%s Invalid layout error %d\n", __func__,
|
|
task->tk_status);
|
|
/*
|
|
* Destroy layout so new i/o will get a new layout.
|
|
* Layout will not be destroyed until all current lseg
|
|
* references are put. Mark layout as invalid to resend failed
|
|
* i/o and all i/o waiting on the slot table to the MDS until
|
|
* layout is destroyed and a new valid layout is obtained.
|
|
*/
|
|
pnfs_destroy_layout(NFS_I(inode));
|
|
rpc_wake_up(&tbl->slot_tbl_waitq);
|
|
goto reset;
|
|
/* RPC connection errors */
|
|
case -ECONNREFUSED:
|
|
case -EHOSTDOWN:
|
|
case -EHOSTUNREACH:
|
|
case -ENETUNREACH:
|
|
case -EIO:
|
|
case -ETIMEDOUT:
|
|
case -EPIPE:
|
|
dprintk("%s DS connection error %d\n", __func__,
|
|
task->tk_status);
|
|
nfs4_delete_deviceid(devid->ld, devid->nfs_client,
|
|
&devid->deviceid);
|
|
rpc_wake_up(&tbl->slot_tbl_waitq);
|
|
/* fall through */
|
|
default:
|
|
if (ff_layout_avoid_mds_available_ds(lseg))
|
|
return -NFS4ERR_RESET_TO_PNFS;
|
|
reset:
|
|
dprintk("%s Retry through MDS. Error %d\n", __func__,
|
|
task->tk_status);
|
|
return -NFS4ERR_RESET_TO_MDS;
|
|
}
|
|
task->tk_status = 0;
|
|
return -EAGAIN;
|
|
}
|
|
|
|
/* Retry all errors through either pNFS or MDS except for -EJUKEBOX */
|
|
static int ff_layout_async_handle_error_v3(struct rpc_task *task,
|
|
struct pnfs_layout_segment *lseg,
|
|
int idx)
|
|
{
|
|
struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
|
|
|
|
switch (task->tk_status) {
|
|
/* File access problems. Don't mark the device as unavailable */
|
|
case -EACCES:
|
|
case -ESTALE:
|
|
case -EISDIR:
|
|
case -EBADHANDLE:
|
|
case -ELOOP:
|
|
case -ENOSPC:
|
|
case -EAGAIN:
|
|
break;
|
|
case -EJUKEBOX:
|
|
nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
|
|
goto out_retry;
|
|
default:
|
|
dprintk("%s DS connection error %d\n", __func__,
|
|
task->tk_status);
|
|
nfs4_delete_deviceid(devid->ld, devid->nfs_client,
|
|
&devid->deviceid);
|
|
}
|
|
/* FIXME: Need to prevent infinite looping here. */
|
|
return -NFS4ERR_RESET_TO_PNFS;
|
|
out_retry:
|
|
task->tk_status = 0;
|
|
rpc_restart_call_prepare(task);
|
|
rpc_delay(task, NFS_JUKEBOX_RETRY_TIME);
|
|
return -EAGAIN;
|
|
}
|
|
|
|
static int ff_layout_async_handle_error(struct rpc_task *task,
|
|
struct nfs4_state *state,
|
|
struct nfs_client *clp,
|
|
struct pnfs_layout_segment *lseg,
|
|
int idx)
|
|
{
|
|
int vers = clp->cl_nfs_mod->rpc_vers->number;
|
|
|
|
if (task->tk_status >= 0)
|
|
return 0;
|
|
|
|
/* Handle the case of an invalid layout segment */
|
|
if (!pnfs_is_valid_lseg(lseg))
|
|
return -NFS4ERR_RESET_TO_PNFS;
|
|
|
|
switch (vers) {
|
|
case 3:
|
|
return ff_layout_async_handle_error_v3(task, lseg, idx);
|
|
case 4:
|
|
return ff_layout_async_handle_error_v4(task, state, clp,
|
|
lseg, idx);
|
|
default:
|
|
/* should never happen */
|
|
WARN_ON_ONCE(1);
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
|
|
int idx, u64 offset, u64 length,
|
|
u32 status, int opnum, int error)
|
|
{
|
|
struct nfs4_ff_layout_mirror *mirror;
|
|
int err;
|
|
|
|
if (status == 0) {
|
|
switch (error) {
|
|
case -ETIMEDOUT:
|
|
case -EPFNOSUPPORT:
|
|
case -EPROTONOSUPPORT:
|
|
case -EOPNOTSUPP:
|
|
case -ECONNREFUSED:
|
|
case -ECONNRESET:
|
|
case -EHOSTDOWN:
|
|
case -EHOSTUNREACH:
|
|
case -ENETUNREACH:
|
|
case -EADDRINUSE:
|
|
case -ENOBUFS:
|
|
case -EPIPE:
|
|
case -EPERM:
|
|
status = NFS4ERR_NXIO;
|
|
break;
|
|
case -EACCES:
|
|
status = NFS4ERR_ACCESS;
|
|
break;
|
|
default:
|
|
return;
|
|
}
|
|
}
|
|
|
|
switch (status) {
|
|
case NFS4ERR_DELAY:
|
|
case NFS4ERR_GRACE:
|
|
return;
|
|
default:
|
|
break;
|
|
}
|
|
|
|
mirror = FF_LAYOUT_COMP(lseg, idx);
|
|
err = ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
|
|
mirror, offset, length, status, opnum,
|
|
GFP_NOIO);
|
|
pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode, lseg);
|
|
dprintk("%s: err %d op %d status %u\n", __func__, err, opnum, status);
|
|
}
|
|
|
|
/* NFS_PROTO call done callback routines */
|
|
static int ff_layout_read_done_cb(struct rpc_task *task,
|
|
struct nfs_pgio_header *hdr)
|
|
{
|
|
int err;
|
|
|
|
trace_nfs4_pnfs_read(hdr, task->tk_status);
|
|
if (task->tk_status < 0)
|
|
ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
|
|
hdr->args.offset, hdr->args.count,
|
|
hdr->res.op_status, OP_READ,
|
|
task->tk_status);
|
|
err = ff_layout_async_handle_error(task, hdr->args.context->state,
|
|
hdr->ds_clp, hdr->lseg,
|
|
hdr->pgio_mirror_idx);
|
|
|
|
clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
|
|
clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
|
|
switch (err) {
|
|
case -NFS4ERR_RESET_TO_PNFS:
|
|
if (ff_layout_choose_best_ds_for_read(hdr->lseg,
|
|
hdr->pgio_mirror_idx + 1,
|
|
&hdr->pgio_mirror_idx))
|
|
goto out_eagain;
|
|
set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
|
|
return task->tk_status;
|
|
case -NFS4ERR_RESET_TO_MDS:
|
|
set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
|
|
return task->tk_status;
|
|
case -EAGAIN:
|
|
goto out_eagain;
|
|
}
|
|
|
|
return 0;
|
|
out_eagain:
|
|
rpc_restart_call_prepare(task);
|
|
return -EAGAIN;
|
|
}
|
|
|
|
static bool
|
|
ff_layout_need_layoutcommit(struct pnfs_layout_segment *lseg)
|
|
{
|
|
return !(FF_LAYOUT_LSEG(lseg)->flags & FF_FLAGS_NO_LAYOUTCOMMIT);
|
|
}
|
|
|
|
/*
|
|
* We reference the rpc_cred of the first WRITE that triggers the need for
|
|
* a LAYOUTCOMMIT, and use it to send the layoutcommit compound.
|
|
* rfc5661 is not clear about which credential should be used.
|
|
*
|
|
* Flexlayout client should treat DS replied FILE_SYNC as DATA_SYNC, so
|
|
* to follow http://www.rfc-editor.org/errata_search.php?rfc=5661&eid=2751
|
|
* we always send layoutcommit after DS writes.
|
|
*/
|
|
static void
|
|
ff_layout_set_layoutcommit(struct inode *inode,
|
|
struct pnfs_layout_segment *lseg,
|
|
loff_t end_offset)
|
|
{
|
|
if (!ff_layout_need_layoutcommit(lseg))
|
|
return;
|
|
|
|
pnfs_set_layoutcommit(inode, lseg, end_offset);
|
|
dprintk("%s inode %lu pls_end_pos %llu\n", __func__, inode->i_ino,
|
|
(unsigned long long) NFS_I(inode)->layout->plh_lwb);
|
|
}
|
|
|
|
static bool
|
|
ff_layout_device_unavailable(struct pnfs_layout_segment *lseg, int idx)
|
|
{
|
|
/* No mirroring for now */
|
|
struct nfs4_deviceid_node *node = FF_LAYOUT_DEVID_NODE(lseg, idx);
|
|
|
|
return ff_layout_test_devid_unavailable(node);
|
|
}
|
|
|
|
static void ff_layout_read_record_layoutstats_start(struct rpc_task *task,
|
|
struct nfs_pgio_header *hdr)
|
|
{
|
|
if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
|
|
return;
|
|
nfs4_ff_layout_stat_io_start_read(hdr->inode,
|
|
FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
|
|
hdr->args.count,
|
|
task->tk_start);
|
|
}
|
|
|
|
static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
|
|
struct nfs_pgio_header *hdr)
|
|
{
|
|
if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
|
|
return;
|
|
nfs4_ff_layout_stat_io_end_read(task,
|
|
FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
|
|
hdr->args.count,
|
|
hdr->res.count);
|
|
set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags);
|
|
}
|
|
|
|
static int ff_layout_read_prepare_common(struct rpc_task *task,
|
|
struct nfs_pgio_header *hdr)
|
|
{
|
|
if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
|
|
rpc_exit(task, -EIO);
|
|
return -EIO;
|
|
}
|
|
if (ff_layout_device_unavailable(hdr->lseg, hdr->pgio_mirror_idx)) {
|
|
rpc_exit(task, -EHOSTDOWN);
|
|
return -EAGAIN;
|
|
}
|
|
|
|
ff_layout_read_record_layoutstats_start(task, hdr);
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* Call ops for the async read/write cases
|
|
* In the case of dense layouts, the offset needs to be reset to its
|
|
* original value.
|
|
*/
|
|
static void ff_layout_read_prepare_v3(struct rpc_task *task, void *data)
|
|
{
|
|
struct nfs_pgio_header *hdr = data;
|
|
|
|
if (ff_layout_read_prepare_common(task, hdr))
|
|
return;
|
|
|
|
rpc_call_start(task);
|
|
}
|
|
|
|
static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data)
|
|
{
|
|
struct nfs_pgio_header *hdr = data;
|
|
|
|
if (nfs4_setup_sequence(hdr->ds_clp,
|
|
&hdr->args.seq_args,
|
|
&hdr->res.seq_res,
|
|
task))
|
|
return;
|
|
|
|
ff_layout_read_prepare_common(task, hdr);
|
|
}
|
|
|
|
static void
|
|
ff_layout_io_prepare_transmit(struct rpc_task *task,
|
|
void *data)
|
|
{
|
|
struct nfs_pgio_header *hdr = data;
|
|
|
|
if (!pnfs_is_valid_lseg(hdr->lseg))
|
|
rpc_exit(task, -EAGAIN);
|
|
}
|
|
|
|
static void ff_layout_read_call_done(struct rpc_task *task, void *data)
|
|
{
|
|
struct nfs_pgio_header *hdr = data;
|
|
|
|
dprintk("--> %s task->tk_status %d\n", __func__, task->tk_status);
|
|
|
|
if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
|
|
task->tk_status == 0) {
|
|
nfs4_sequence_done(task, &hdr->res.seq_res);
|
|
return;
|
|
}
|
|
|
|
/* Note this may cause RPC to be resent */
|
|
hdr->mds_ops->rpc_call_done(task, hdr);
|
|
}
|
|
|
|
static void ff_layout_read_count_stats(struct rpc_task *task, void *data)
|
|
{
|
|
struct nfs_pgio_header *hdr = data;
|
|
|
|
ff_layout_read_record_layoutstats_done(task, hdr);
|
|
rpc_count_iostats_metrics(task,
|
|
&NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_READ]);
|
|
}
|
|
|
|
static void ff_layout_read_release(void *data)
|
|
{
|
|
struct nfs_pgio_header *hdr = data;
|
|
|
|
ff_layout_read_record_layoutstats_done(&hdr->task, hdr);
|
|
if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags))
|
|
pnfs_read_resend_pnfs(hdr);
|
|
else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
|
|
ff_layout_reset_read(hdr);
|
|
pnfs_generic_rw_release(data);
|
|
}
|
|
|
|
|
|
static int ff_layout_write_done_cb(struct rpc_task *task,
|
|
struct nfs_pgio_header *hdr)
|
|
{
|
|
loff_t end_offs = 0;
|
|
int err;
|
|
|
|
trace_nfs4_pnfs_write(hdr, task->tk_status);
|
|
if (task->tk_status < 0)
|
|
ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
|
|
hdr->args.offset, hdr->args.count,
|
|
hdr->res.op_status, OP_WRITE,
|
|
task->tk_status);
|
|
err = ff_layout_async_handle_error(task, hdr->args.context->state,
|
|
hdr->ds_clp, hdr->lseg,
|
|
hdr->pgio_mirror_idx);
|
|
|
|
clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
|
|
clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
|
|
switch (err) {
|
|
case -NFS4ERR_RESET_TO_PNFS:
|
|
set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
|
|
return task->tk_status;
|
|
case -NFS4ERR_RESET_TO_MDS:
|
|
set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
|
|
return task->tk_status;
|
|
case -EAGAIN:
|
|
return -EAGAIN;
|
|
}
|
|
|
|
if (hdr->res.verf->committed == NFS_FILE_SYNC ||
|
|
hdr->res.verf->committed == NFS_DATA_SYNC)
|
|
end_offs = hdr->mds_offset + (loff_t)hdr->res.count;
|
|
|
|
/* Note: if the write is unstable, don't set end_offs until commit */
|
|
ff_layout_set_layoutcommit(hdr->inode, hdr->lseg, end_offs);
|
|
|
|
/* zero out fattr since we don't care DS attr at all */
|
|
hdr->fattr.valid = 0;
|
|
if (task->tk_status >= 0)
|
|
nfs_writeback_update_inode(hdr);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int ff_layout_commit_done_cb(struct rpc_task *task,
|
|
struct nfs_commit_data *data)
|
|
{
|
|
int err;
|
|
|
|
trace_nfs4_pnfs_commit_ds(data, task->tk_status);
|
|
if (task->tk_status < 0)
|
|
ff_layout_io_track_ds_error(data->lseg, data->ds_commit_index,
|
|
data->args.offset, data->args.count,
|
|
data->res.op_status, OP_COMMIT,
|
|
task->tk_status);
|
|
err = ff_layout_async_handle_error(task, NULL, data->ds_clp,
|
|
data->lseg, data->ds_commit_index);
|
|
|
|
switch (err) {
|
|
case -NFS4ERR_RESET_TO_PNFS:
|
|
pnfs_generic_prepare_to_resend_writes(data);
|
|
return -EAGAIN;
|
|
case -NFS4ERR_RESET_TO_MDS:
|
|
pnfs_generic_prepare_to_resend_writes(data);
|
|
return -EAGAIN;
|
|
case -EAGAIN:
|
|
rpc_restart_call_prepare(task);
|
|
return -EAGAIN;
|
|
}
|
|
|
|
ff_layout_set_layoutcommit(data->inode, data->lseg, data->lwb);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void ff_layout_write_record_layoutstats_start(struct rpc_task *task,
|
|
struct nfs_pgio_header *hdr)
|
|
{
|
|
if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
|
|
return;
|
|
nfs4_ff_layout_stat_io_start_write(hdr->inode,
|
|
FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
|
|
hdr->args.count,
|
|
task->tk_start);
|
|
}
|
|
|
|
static void ff_layout_write_record_layoutstats_done(struct rpc_task *task,
|
|
struct nfs_pgio_header *hdr)
|
|
{
|
|
if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
|
|
return;
|
|
nfs4_ff_layout_stat_io_end_write(task,
|
|
FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
|
|
hdr->args.count, hdr->res.count,
|
|
hdr->res.verf->committed);
|
|
set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags);
|
|
}
|
|
|
|
static int ff_layout_write_prepare_common(struct rpc_task *task,
|
|
struct nfs_pgio_header *hdr)
|
|
{
|
|
if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
|
|
rpc_exit(task, -EIO);
|
|
return -EIO;
|
|
}
|
|
|
|
if (ff_layout_device_unavailable(hdr->lseg, hdr->pgio_mirror_idx)) {
|
|
rpc_exit(task, -EHOSTDOWN);
|
|
return -EAGAIN;
|
|
}
|
|
|
|
ff_layout_write_record_layoutstats_start(task, hdr);
|
|
return 0;
|
|
}
|
|
|
|
static void ff_layout_write_prepare_v3(struct rpc_task *task, void *data)
|
|
{
|
|
struct nfs_pgio_header *hdr = data;
|
|
|
|
if (ff_layout_write_prepare_common(task, hdr))
|
|
return;
|
|
|
|
rpc_call_start(task);
|
|
}
|
|
|
|
static void ff_layout_write_prepare_v4(struct rpc_task *task, void *data)
|
|
{
|
|
struct nfs_pgio_header *hdr = data;
|
|
|
|
if (nfs4_setup_sequence(hdr->ds_clp,
|
|
&hdr->args.seq_args,
|
|
&hdr->res.seq_res,
|
|
task))
|
|
return;
|
|
|
|
ff_layout_write_prepare_common(task, hdr);
|
|
}
|
|
|
|
static void ff_layout_write_call_done(struct rpc_task *task, void *data)
|
|
{
|
|
struct nfs_pgio_header *hdr = data;
|
|
|
|
if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
|
|
task->tk_status == 0) {
|
|
nfs4_sequence_done(task, &hdr->res.seq_res);
|
|
return;
|
|
}
|
|
|
|
/* Note this may cause RPC to be resent */
|
|
hdr->mds_ops->rpc_call_done(task, hdr);
|
|
}
|
|
|
|
static void ff_layout_write_count_stats(struct rpc_task *task, void *data)
|
|
{
|
|
struct nfs_pgio_header *hdr = data;
|
|
|
|
ff_layout_write_record_layoutstats_done(task, hdr);
|
|
rpc_count_iostats_metrics(task,
|
|
&NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_WRITE]);
|
|
}
|
|
|
|
static void ff_layout_write_release(void *data)
|
|
{
|
|
struct nfs_pgio_header *hdr = data;
|
|
|
|
ff_layout_write_record_layoutstats_done(&hdr->task, hdr);
|
|
if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags))
|
|
ff_layout_reset_write(hdr, true);
|
|
else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
|
|
ff_layout_reset_write(hdr, false);
|
|
pnfs_generic_rw_release(data);
|
|
}
|
|
|
|
static void ff_layout_commit_record_layoutstats_start(struct rpc_task *task,
|
|
struct nfs_commit_data *cdata)
|
|
{
|
|
if (test_and_set_bit(NFS_IOHDR_STAT, &cdata->flags))
|
|
return;
|
|
nfs4_ff_layout_stat_io_start_write(cdata->inode,
|
|
FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
|
|
0, task->tk_start);
|
|
}
|
|
|
|
static void ff_layout_commit_record_layoutstats_done(struct rpc_task *task,
|
|
struct nfs_commit_data *cdata)
|
|
{
|
|
struct nfs_page *req;
|
|
__u64 count = 0;
|
|
|
|
if (!test_and_clear_bit(NFS_IOHDR_STAT, &cdata->flags))
|
|
return;
|
|
|
|
if (task->tk_status == 0) {
|
|
list_for_each_entry(req, &cdata->pages, wb_list)
|
|
count += req->wb_bytes;
|
|
}
|
|
nfs4_ff_layout_stat_io_end_write(task,
|
|
FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
|
|
count, count, NFS_FILE_SYNC);
|
|
set_bit(NFS_LSEG_LAYOUTRETURN, &cdata->lseg->pls_flags);
|
|
}
|
|
|
|
static void ff_layout_commit_prepare_common(struct rpc_task *task,
|
|
struct nfs_commit_data *cdata)
|
|
{
|
|
ff_layout_commit_record_layoutstats_start(task, cdata);
|
|
}
|
|
|
|
static void ff_layout_commit_prepare_v3(struct rpc_task *task, void *data)
|
|
{
|
|
ff_layout_commit_prepare_common(task, data);
|
|
rpc_call_start(task);
|
|
}
|
|
|
|
static void ff_layout_commit_prepare_v4(struct rpc_task *task, void *data)
|
|
{
|
|
struct nfs_commit_data *wdata = data;
|
|
|
|
if (nfs4_setup_sequence(wdata->ds_clp,
|
|
&wdata->args.seq_args,
|
|
&wdata->res.seq_res,
|
|
task))
|
|
return;
|
|
ff_layout_commit_prepare_common(task, data);
|
|
}
|
|
|
|
static void ff_layout_commit_done(struct rpc_task *task, void *data)
|
|
{
|
|
pnfs_generic_write_commit_done(task, data);
|
|
}
|
|
|
|
static void ff_layout_commit_count_stats(struct rpc_task *task, void *data)
|
|
{
|
|
struct nfs_commit_data *cdata = data;
|
|
|
|
ff_layout_commit_record_layoutstats_done(task, cdata);
|
|
rpc_count_iostats_metrics(task,
|
|
&NFS_CLIENT(cdata->inode)->cl_metrics[NFSPROC4_CLNT_COMMIT]);
|
|
}
|
|
|
|
static void ff_layout_commit_release(void *data)
|
|
{
|
|
struct nfs_commit_data *cdata = data;
|
|
|
|
ff_layout_commit_record_layoutstats_done(&cdata->task, cdata);
|
|
pnfs_generic_commit_release(data);
|
|
}
|
|
|
|
static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
|
|
.rpc_call_prepare = ff_layout_read_prepare_v3,
|
|
.rpc_call_prepare_transmit = ff_layout_io_prepare_transmit,
|
|
.rpc_call_done = ff_layout_read_call_done,
|
|
.rpc_count_stats = ff_layout_read_count_stats,
|
|
.rpc_release = ff_layout_read_release,
|
|
};
|
|
|
|
static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
|
|
.rpc_call_prepare = ff_layout_read_prepare_v4,
|
|
.rpc_call_prepare_transmit = ff_layout_io_prepare_transmit,
|
|
.rpc_call_done = ff_layout_read_call_done,
|
|
.rpc_count_stats = ff_layout_read_count_stats,
|
|
.rpc_release = ff_layout_read_release,
|
|
};
|
|
|
|
static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
|
|
.rpc_call_prepare = ff_layout_write_prepare_v3,
|
|
.rpc_call_prepare_transmit = ff_layout_io_prepare_transmit,
|
|
.rpc_call_done = ff_layout_write_call_done,
|
|
.rpc_count_stats = ff_layout_write_count_stats,
|
|
.rpc_release = ff_layout_write_release,
|
|
};
|
|
|
|
static const struct rpc_call_ops ff_layout_write_call_ops_v4 = {
|
|
.rpc_call_prepare = ff_layout_write_prepare_v4,
|
|
.rpc_call_prepare_transmit = ff_layout_io_prepare_transmit,
|
|
.rpc_call_done = ff_layout_write_call_done,
|
|
.rpc_count_stats = ff_layout_write_count_stats,
|
|
.rpc_release = ff_layout_write_release,
|
|
};
|
|
|
|
static const struct rpc_call_ops ff_layout_commit_call_ops_v3 = {
|
|
.rpc_call_prepare = ff_layout_commit_prepare_v3,
|
|
.rpc_call_done = ff_layout_commit_done,
|
|
.rpc_count_stats = ff_layout_commit_count_stats,
|
|
.rpc_release = ff_layout_commit_release,
|
|
};
|
|
|
|
static const struct rpc_call_ops ff_layout_commit_call_ops_v4 = {
|
|
.rpc_call_prepare = ff_layout_commit_prepare_v4,
|
|
.rpc_call_done = ff_layout_commit_done,
|
|
.rpc_count_stats = ff_layout_commit_count_stats,
|
|
.rpc_release = ff_layout_commit_release,
|
|
};
|
|
|
|
static enum pnfs_try_status
|
|
ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
|
|
{
|
|
struct pnfs_layout_segment *lseg = hdr->lseg;
|
|
struct nfs4_pnfs_ds *ds;
|
|
struct rpc_clnt *ds_clnt;
|
|
const struct cred *ds_cred;
|
|
loff_t offset = hdr->args.offset;
|
|
u32 idx = hdr->pgio_mirror_idx;
|
|
int vers;
|
|
struct nfs_fh *fh;
|
|
|
|
dprintk("--> %s ino %lu pgbase %u req %zu@%llu\n",
|
|
__func__, hdr->inode->i_ino,
|
|
hdr->args.pgbase, (size_t)hdr->args.count, offset);
|
|
|
|
ds = nfs4_ff_layout_prepare_ds(lseg, idx, false);
|
|
if (!ds)
|
|
goto out_failed;
|
|
|
|
ds_clnt = nfs4_ff_find_or_create_ds_client(lseg, idx, ds->ds_clp,
|
|
hdr->inode);
|
|
if (IS_ERR(ds_clnt))
|
|
goto out_failed;
|
|
|
|
ds_cred = ff_layout_get_ds_cred(lseg, idx, hdr->cred);
|
|
if (!ds_cred)
|
|
goto out_failed;
|
|
|
|
vers = nfs4_ff_layout_ds_version(lseg, idx);
|
|
|
|
dprintk("%s USE DS: %s cl_count %d vers %d\n", __func__,
|
|
ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count), vers);
|
|
|
|
hdr->pgio_done_cb = ff_layout_read_done_cb;
|
|
refcount_inc(&ds->ds_clp->cl_count);
|
|
hdr->ds_clp = ds->ds_clp;
|
|
fh = nfs4_ff_layout_select_ds_fh(lseg, idx);
|
|
if (fh)
|
|
hdr->args.fh = fh;
|
|
|
|
if (vers == 4 &&
|
|
!nfs4_ff_layout_select_ds_stateid(lseg, idx, &hdr->args.stateid))
|
|
goto out_failed;
|
|
|
|
/*
|
|
* Note that if we ever decide to split across DSes,
|
|
* then we may need to handle dense-like offsets.
|
|
*/
|
|
hdr->args.offset = offset;
|
|
hdr->mds_offset = offset;
|
|
|
|
/* Perform an asynchronous read to ds */
|
|
nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
|
|
vers == 3 ? &ff_layout_read_call_ops_v3 :
|
|
&ff_layout_read_call_ops_v4,
|
|
0, RPC_TASK_SOFTCONN);
|
|
put_cred(ds_cred);
|
|
return PNFS_ATTEMPTED;
|
|
|
|
out_failed:
|
|
if (ff_layout_avoid_mds_available_ds(lseg))
|
|
return PNFS_TRY_AGAIN;
|
|
return PNFS_NOT_ATTEMPTED;
|
|
}
|
|
|
|
/* Perform async writes. */
|
|
static enum pnfs_try_status
|
|
ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
|
|
{
|
|
struct pnfs_layout_segment *lseg = hdr->lseg;
|
|
struct nfs4_pnfs_ds *ds;
|
|
struct rpc_clnt *ds_clnt;
|
|
const struct cred *ds_cred;
|
|
loff_t offset = hdr->args.offset;
|
|
int vers;
|
|
struct nfs_fh *fh;
|
|
int idx = hdr->pgio_mirror_idx;
|
|
|
|
ds = nfs4_ff_layout_prepare_ds(lseg, idx, true);
|
|
if (!ds)
|
|
goto out_failed;
|
|
|
|
ds_clnt = nfs4_ff_find_or_create_ds_client(lseg, idx, ds->ds_clp,
|
|
hdr->inode);
|
|
if (IS_ERR(ds_clnt))
|
|
goto out_failed;
|
|
|
|
ds_cred = ff_layout_get_ds_cred(lseg, idx, hdr->cred);
|
|
if (!ds_cred)
|
|
goto out_failed;
|
|
|
|
vers = nfs4_ff_layout_ds_version(lseg, idx);
|
|
|
|
dprintk("%s ino %lu sync %d req %zu@%llu DS: %s cl_count %d vers %d\n",
|
|
__func__, hdr->inode->i_ino, sync, (size_t) hdr->args.count,
|
|
offset, ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count),
|
|
vers);
|
|
|
|
hdr->pgio_done_cb = ff_layout_write_done_cb;
|
|
refcount_inc(&ds->ds_clp->cl_count);
|
|
hdr->ds_clp = ds->ds_clp;
|
|
hdr->ds_commit_idx = idx;
|
|
fh = nfs4_ff_layout_select_ds_fh(lseg, idx);
|
|
if (fh)
|
|
hdr->args.fh = fh;
|
|
|
|
if (vers == 4 &&
|
|
!nfs4_ff_layout_select_ds_stateid(lseg, idx, &hdr->args.stateid))
|
|
goto out_failed;
|
|
|
|
/*
|
|
* Note that if we ever decide to split across DSes,
|
|
* then we may need to handle dense-like offsets.
|
|
*/
|
|
hdr->args.offset = offset;
|
|
|
|
/* Perform an asynchronous write */
|
|
nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
|
|
vers == 3 ? &ff_layout_write_call_ops_v3 :
|
|
&ff_layout_write_call_ops_v4,
|
|
sync, RPC_TASK_SOFTCONN);
|
|
put_cred(ds_cred);
|
|
return PNFS_ATTEMPTED;
|
|
|
|
out_failed:
|
|
if (ff_layout_avoid_mds_available_ds(lseg))
|
|
return PNFS_TRY_AGAIN;
|
|
return PNFS_NOT_ATTEMPTED;
|
|
}
|
|
|
|
static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i)
|
|
{
|
|
return i;
|
|
}
|
|
|
|
static struct nfs_fh *
|
|
select_ds_fh_from_commit(struct pnfs_layout_segment *lseg, u32 i)
|
|
{
|
|
struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
|
|
|
|
/* FIXME: Assume that there is only one NFS version available
|
|
* for the DS.
|
|
*/
|
|
return &flseg->mirror_array[i]->fh_versions[0];
|
|
}
|
|
|
|
static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
|
|
{
|
|
struct pnfs_layout_segment *lseg = data->lseg;
|
|
struct nfs4_pnfs_ds *ds;
|
|
struct rpc_clnt *ds_clnt;
|
|
const struct cred *ds_cred;
|
|
u32 idx;
|
|
int vers, ret;
|
|
struct nfs_fh *fh;
|
|
|
|
if (!lseg || !(pnfs_is_valid_lseg(lseg) ||
|
|
test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags)))
|
|
goto out_err;
|
|
|
|
idx = calc_ds_index_from_commit(lseg, data->ds_commit_index);
|
|
ds = nfs4_ff_layout_prepare_ds(lseg, idx, true);
|
|
if (!ds)
|
|
goto out_err;
|
|
|
|
ds_clnt = nfs4_ff_find_or_create_ds_client(lseg, idx, ds->ds_clp,
|
|
data->inode);
|
|
if (IS_ERR(ds_clnt))
|
|
goto out_err;
|
|
|
|
ds_cred = ff_layout_get_ds_cred(lseg, idx, data->cred);
|
|
if (!ds_cred)
|
|
goto out_err;
|
|
|
|
vers = nfs4_ff_layout_ds_version(lseg, idx);
|
|
|
|
dprintk("%s ino %lu, how %d cl_count %d vers %d\n", __func__,
|
|
data->inode->i_ino, how, refcount_read(&ds->ds_clp->cl_count),
|
|
vers);
|
|
data->commit_done_cb = ff_layout_commit_done_cb;
|
|
data->cred = ds_cred;
|
|
refcount_inc(&ds->ds_clp->cl_count);
|
|
data->ds_clp = ds->ds_clp;
|
|
fh = select_ds_fh_from_commit(lseg, data->ds_commit_index);
|
|
if (fh)
|
|
data->args.fh = fh;
|
|
|
|
ret = nfs_initiate_commit(ds_clnt, data, ds->ds_clp->rpc_ops,
|
|
vers == 3 ? &ff_layout_commit_call_ops_v3 :
|
|
&ff_layout_commit_call_ops_v4,
|
|
how, RPC_TASK_SOFTCONN);
|
|
put_cred(ds_cred);
|
|
return ret;
|
|
out_err:
|
|
pnfs_generic_prepare_to_resend_writes(data);
|
|
pnfs_generic_commit_release(data);
|
|
return -EAGAIN;
|
|
}
|
|
|
|
static int
|
|
ff_layout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
|
|
int how, struct nfs_commit_info *cinfo)
|
|
{
|
|
return pnfs_generic_commit_pagelist(inode, mds_pages, how, cinfo,
|
|
ff_layout_initiate_commit);
|
|
}
|
|
|
|
static struct pnfs_ds_commit_info *
|
|
ff_layout_get_ds_info(struct inode *inode)
|
|
{
|
|
struct pnfs_layout_hdr *layout = NFS_I(inode)->layout;
|
|
|
|
if (layout == NULL)
|
|
return NULL;
|
|
|
|
return &FF_LAYOUT_FROM_HDR(layout)->commit_info;
|
|
}
|
|
|
|
static void
|
|
ff_layout_free_deviceid_node(struct nfs4_deviceid_node *d)
|
|
{
|
|
nfs4_ff_layout_free_deviceid(container_of(d, struct nfs4_ff_layout_ds,
|
|
id_node));
|
|
}
|
|
|
|
static int ff_layout_encode_ioerr(struct xdr_stream *xdr,
|
|
const struct nfs4_layoutreturn_args *args,
|
|
const struct nfs4_flexfile_layoutreturn_args *ff_args)
|
|
{
|
|
__be32 *start;
|
|
|
|
start = xdr_reserve_space(xdr, 4);
|
|
if (unlikely(!start))
|
|
return -E2BIG;
|
|
|
|
*start = cpu_to_be32(ff_args->num_errors);
|
|
/* This assume we always return _ALL_ layouts */
|
|
return ff_layout_encode_ds_ioerr(xdr, &ff_args->errors);
|
|
}
|
|
|
|
static void
|
|
encode_opaque_fixed(struct xdr_stream *xdr, const void *buf, size_t len)
|
|
{
|
|
WARN_ON_ONCE(xdr_stream_encode_opaque_fixed(xdr, buf, len) < 0);
|
|
}
|
|
|
|
static void
|
|
ff_layout_encode_ff_iostat_head(struct xdr_stream *xdr,
|
|
const nfs4_stateid *stateid,
|
|
const struct nfs42_layoutstat_devinfo *devinfo)
|
|
{
|
|
__be32 *p;
|
|
|
|
p = xdr_reserve_space(xdr, 8 + 8);
|
|
p = xdr_encode_hyper(p, devinfo->offset);
|
|
p = xdr_encode_hyper(p, devinfo->length);
|
|
encode_opaque_fixed(xdr, stateid->data, NFS4_STATEID_SIZE);
|
|
p = xdr_reserve_space(xdr, 4*8);
|
|
p = xdr_encode_hyper(p, devinfo->read_count);
|
|
p = xdr_encode_hyper(p, devinfo->read_bytes);
|
|
p = xdr_encode_hyper(p, devinfo->write_count);
|
|
p = xdr_encode_hyper(p, devinfo->write_bytes);
|
|
encode_opaque_fixed(xdr, devinfo->dev_id.data, NFS4_DEVICEID4_SIZE);
|
|
}
|
|
|
|
static void
|
|
ff_layout_encode_ff_iostat(struct xdr_stream *xdr,
|
|
const nfs4_stateid *stateid,
|
|
const struct nfs42_layoutstat_devinfo *devinfo)
|
|
{
|
|
ff_layout_encode_ff_iostat_head(xdr, stateid, devinfo);
|
|
ff_layout_encode_ff_layoutupdate(xdr, devinfo,
|
|
devinfo->ld_private.data);
|
|
}
|
|
|
|
/* report nothing for now */
|
|
static void ff_layout_encode_iostats_array(struct xdr_stream *xdr,
|
|
const struct nfs4_layoutreturn_args *args,
|
|
struct nfs4_flexfile_layoutreturn_args *ff_args)
|
|
{
|
|
__be32 *p;
|
|
int i;
|
|
|
|
p = xdr_reserve_space(xdr, 4);
|
|
*p = cpu_to_be32(ff_args->num_dev);
|
|
for (i = 0; i < ff_args->num_dev; i++)
|
|
ff_layout_encode_ff_iostat(xdr,
|
|
&args->layout->plh_stateid,
|
|
&ff_args->devinfo[i]);
|
|
}
|
|
|
|
static void
|
|
ff_layout_free_iostats_array(struct nfs42_layoutstat_devinfo *devinfo,
|
|
unsigned int num_entries)
|
|
{
|
|
unsigned int i;
|
|
|
|
for (i = 0; i < num_entries; i++) {
|
|
if (!devinfo[i].ld_private.ops)
|
|
continue;
|
|
if (!devinfo[i].ld_private.ops->free)
|
|
continue;
|
|
devinfo[i].ld_private.ops->free(&devinfo[i].ld_private);
|
|
}
|
|
}
|
|
|
|
static struct nfs4_deviceid_node *
|
|
ff_layout_alloc_deviceid_node(struct nfs_server *server,
|
|
struct pnfs_device *pdev, gfp_t gfp_flags)
|
|
{
|
|
struct nfs4_ff_layout_ds *dsaddr;
|
|
|
|
dsaddr = nfs4_ff_alloc_deviceid_node(server, pdev, gfp_flags);
|
|
if (!dsaddr)
|
|
return NULL;
|
|
return &dsaddr->id_node;
|
|
}
|
|
|
|
static void
|
|
ff_layout_encode_layoutreturn(struct xdr_stream *xdr,
|
|
const void *voidargs,
|
|
const struct nfs4_xdr_opaque_data *ff_opaque)
|
|
{
|
|
const struct nfs4_layoutreturn_args *args = voidargs;
|
|
struct nfs4_flexfile_layoutreturn_args *ff_args = ff_opaque->data;
|
|
struct xdr_buf tmp_buf = {
|
|
.head = {
|
|
[0] = {
|
|
.iov_base = page_address(ff_args->pages[0]),
|
|
},
|
|
},
|
|
.buflen = PAGE_SIZE,
|
|
};
|
|
struct xdr_stream tmp_xdr;
|
|
__be32 *start;
|
|
|
|
dprintk("%s: Begin\n", __func__);
|
|
|
|
xdr_init_encode(&tmp_xdr, &tmp_buf, NULL, NULL);
|
|
|
|
ff_layout_encode_ioerr(&tmp_xdr, args, ff_args);
|
|
ff_layout_encode_iostats_array(&tmp_xdr, args, ff_args);
|
|
|
|
start = xdr_reserve_space(xdr, 4);
|
|
*start = cpu_to_be32(tmp_buf.len);
|
|
xdr_write_pages(xdr, ff_args->pages, 0, tmp_buf.len);
|
|
|
|
dprintk("%s: Return\n", __func__);
|
|
}
|
|
|
|
static void
|
|
ff_layout_free_layoutreturn(struct nfs4_xdr_opaque_data *args)
|
|
{
|
|
struct nfs4_flexfile_layoutreturn_args *ff_args;
|
|
|
|
if (!args->data)
|
|
return;
|
|
ff_args = args->data;
|
|
args->data = NULL;
|
|
|
|
ff_layout_free_ds_ioerr(&ff_args->errors);
|
|
ff_layout_free_iostats_array(ff_args->devinfo, ff_args->num_dev);
|
|
|
|
put_page(ff_args->pages[0]);
|
|
kfree(ff_args);
|
|
}
|
|
|
|
static const struct nfs4_xdr_opaque_ops layoutreturn_ops = {
|
|
.encode = ff_layout_encode_layoutreturn,
|
|
.free = ff_layout_free_layoutreturn,
|
|
};
|
|
|
|
static int
|
|
ff_layout_prepare_layoutreturn(struct nfs4_layoutreturn_args *args)
|
|
{
|
|
struct nfs4_flexfile_layoutreturn_args *ff_args;
|
|
struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(args->layout);
|
|
|
|
ff_args = kmalloc(sizeof(*ff_args), GFP_KERNEL);
|
|
if (!ff_args)
|
|
goto out_nomem;
|
|
ff_args->pages[0] = alloc_page(GFP_KERNEL);
|
|
if (!ff_args->pages[0])
|
|
goto out_nomem_free;
|
|
|
|
INIT_LIST_HEAD(&ff_args->errors);
|
|
ff_args->num_errors = ff_layout_fetch_ds_ioerr(args->layout,
|
|
&args->range, &ff_args->errors,
|
|
FF_LAYOUTRETURN_MAXERR);
|
|
|
|
spin_lock(&args->inode->i_lock);
|
|
ff_args->num_dev = ff_layout_mirror_prepare_stats(&ff_layout->generic_hdr,
|
|
&ff_args->devinfo[0], ARRAY_SIZE(ff_args->devinfo));
|
|
spin_unlock(&args->inode->i_lock);
|
|
|
|
args->ld_private->ops = &layoutreturn_ops;
|
|
args->ld_private->data = ff_args;
|
|
return 0;
|
|
out_nomem_free:
|
|
kfree(ff_args);
|
|
out_nomem:
|
|
return -ENOMEM;
|
|
}
|
|
|
|
static int
|
|
ff_layout_ntop4(const struct sockaddr *sap, char *buf, const size_t buflen)
|
|
{
|
|
const struct sockaddr_in *sin = (struct sockaddr_in *)sap;
|
|
|
|
return snprintf(buf, buflen, "%pI4", &sin->sin_addr);
|
|
}
|
|
|
|
static size_t
|
|
ff_layout_ntop6_noscopeid(const struct sockaddr *sap, char *buf,
|
|
const int buflen)
|
|
{
|
|
const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
|
|
const struct in6_addr *addr = &sin6->sin6_addr;
|
|
|
|
/*
|
|
* RFC 4291, Section 2.2.2
|
|
*
|
|
* Shorthanded ANY address
|
|
*/
|
|
if (ipv6_addr_any(addr))
|
|
return snprintf(buf, buflen, "::");
|
|
|
|
/*
|
|
* RFC 4291, Section 2.2.2
|
|
*
|
|
* Shorthanded loopback address
|
|
*/
|
|
if (ipv6_addr_loopback(addr))
|
|
return snprintf(buf, buflen, "::1");
|
|
|
|
/*
|
|
* RFC 4291, Section 2.2.3
|
|
*
|
|
* Special presentation address format for mapped v4
|
|
* addresses.
|
|
*/
|
|
if (ipv6_addr_v4mapped(addr))
|
|
return snprintf(buf, buflen, "::ffff:%pI4",
|
|
&addr->s6_addr32[3]);
|
|
|
|
/*
|
|
* RFC 4291, Section 2.2.1
|
|
*/
|
|
return snprintf(buf, buflen, "%pI6c", addr);
|
|
}
|
|
|
|
/* Derived from rpc_sockaddr2uaddr */
|
|
static void
|
|
ff_layout_encode_netaddr(struct xdr_stream *xdr, struct nfs4_pnfs_ds_addr *da)
|
|
{
|
|
struct sockaddr *sap = (struct sockaddr *)&da->da_addr;
|
|
char portbuf[RPCBIND_MAXUADDRPLEN];
|
|
char addrbuf[RPCBIND_MAXUADDRLEN];
|
|
char *netid;
|
|
unsigned short port;
|
|
int len, netid_len;
|
|
__be32 *p;
|
|
|
|
switch (sap->sa_family) {
|
|
case AF_INET:
|
|
if (ff_layout_ntop4(sap, addrbuf, sizeof(addrbuf)) == 0)
|
|
return;
|
|
port = ntohs(((struct sockaddr_in *)sap)->sin_port);
|
|
netid = "tcp";
|
|
netid_len = 3;
|
|
break;
|
|
case AF_INET6:
|
|
if (ff_layout_ntop6_noscopeid(sap, addrbuf, sizeof(addrbuf)) == 0)
|
|
return;
|
|
port = ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
|
|
netid = "tcp6";
|
|
netid_len = 4;
|
|
break;
|
|
default:
|
|
/* we only support tcp and tcp6 */
|
|
WARN_ON_ONCE(1);
|
|
return;
|
|
}
|
|
|
|
snprintf(portbuf, sizeof(portbuf), ".%u.%u", port >> 8, port & 0xff);
|
|
len = strlcat(addrbuf, portbuf, sizeof(addrbuf));
|
|
|
|
p = xdr_reserve_space(xdr, 4 + netid_len);
|
|
xdr_encode_opaque(p, netid, netid_len);
|
|
|
|
p = xdr_reserve_space(xdr, 4 + len);
|
|
xdr_encode_opaque(p, addrbuf, len);
|
|
}
|
|
|
|
static void
|
|
ff_layout_encode_nfstime(struct xdr_stream *xdr,
|
|
ktime_t t)
|
|
{
|
|
struct timespec64 ts;
|
|
__be32 *p;
|
|
|
|
p = xdr_reserve_space(xdr, 12);
|
|
ts = ktime_to_timespec64(t);
|
|
p = xdr_encode_hyper(p, ts.tv_sec);
|
|
*p++ = cpu_to_be32(ts.tv_nsec);
|
|
}
|
|
|
|
static void
|
|
ff_layout_encode_io_latency(struct xdr_stream *xdr,
|
|
struct nfs4_ff_io_stat *stat)
|
|
{
|
|
__be32 *p;
|
|
|
|
p = xdr_reserve_space(xdr, 5 * 8);
|
|
p = xdr_encode_hyper(p, stat->ops_requested);
|
|
p = xdr_encode_hyper(p, stat->bytes_requested);
|
|
p = xdr_encode_hyper(p, stat->ops_completed);
|
|
p = xdr_encode_hyper(p, stat->bytes_completed);
|
|
p = xdr_encode_hyper(p, stat->bytes_not_delivered);
|
|
ff_layout_encode_nfstime(xdr, stat->total_busy_time);
|
|
ff_layout_encode_nfstime(xdr, stat->aggregate_completion_time);
|
|
}
|
|
|
|
static void
|
|
ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
|
|
const struct nfs42_layoutstat_devinfo *devinfo,
|
|
struct nfs4_ff_layout_mirror *mirror)
|
|
{
|
|
struct nfs4_pnfs_ds_addr *da;
|
|
struct nfs4_pnfs_ds *ds = mirror->mirror_ds->ds;
|
|
struct nfs_fh *fh = &mirror->fh_versions[0];
|
|
__be32 *p;
|
|
|
|
da = list_first_entry(&ds->ds_addrs, struct nfs4_pnfs_ds_addr, da_node);
|
|
dprintk("%s: DS %s: encoding address %s\n",
|
|
__func__, ds->ds_remotestr, da->da_remotestr);
|
|
/* netaddr4 */
|
|
ff_layout_encode_netaddr(xdr, da);
|
|
/* nfs_fh4 */
|
|
p = xdr_reserve_space(xdr, 4 + fh->size);
|
|
xdr_encode_opaque(p, fh->data, fh->size);
|
|
/* ff_io_latency4 read */
|
|
spin_lock(&mirror->lock);
|
|
ff_layout_encode_io_latency(xdr, &mirror->read_stat.io_stat);
|
|
/* ff_io_latency4 write */
|
|
ff_layout_encode_io_latency(xdr, &mirror->write_stat.io_stat);
|
|
spin_unlock(&mirror->lock);
|
|
/* nfstime4 */
|
|
ff_layout_encode_nfstime(xdr, ktime_sub(ktime_get(), mirror->start_time));
|
|
/* bool */
|
|
p = xdr_reserve_space(xdr, 4);
|
|
*p = cpu_to_be32(false);
|
|
}
|
|
|
|
static void
|
|
ff_layout_encode_layoutstats(struct xdr_stream *xdr, const void *args,
|
|
const struct nfs4_xdr_opaque_data *opaque)
|
|
{
|
|
struct nfs42_layoutstat_devinfo *devinfo = container_of(opaque,
|
|
struct nfs42_layoutstat_devinfo, ld_private);
|
|
__be32 *start;
|
|
|
|
/* layoutupdate length */
|
|
start = xdr_reserve_space(xdr, 4);
|
|
ff_layout_encode_ff_layoutupdate(xdr, devinfo, opaque->data);
|
|
|
|
*start = cpu_to_be32((xdr->p - start - 1) * 4);
|
|
}
|
|
|
|
static void
|
|
ff_layout_free_layoutstats(struct nfs4_xdr_opaque_data *opaque)
|
|
{
|
|
struct nfs4_ff_layout_mirror *mirror = opaque->data;
|
|
|
|
ff_layout_put_mirror(mirror);
|
|
}
|
|
|
|
static const struct nfs4_xdr_opaque_ops layoutstat_ops = {
|
|
.encode = ff_layout_encode_layoutstats,
|
|
.free = ff_layout_free_layoutstats,
|
|
};
|
|
|
|
static int
|
|
ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
|
|
struct nfs42_layoutstat_devinfo *devinfo,
|
|
int dev_limit)
|
|
{
|
|
struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
|
|
struct nfs4_ff_layout_mirror *mirror;
|
|
struct nfs4_deviceid_node *dev;
|
|
int i = 0;
|
|
|
|
list_for_each_entry(mirror, &ff_layout->mirrors, mirrors) {
|
|
if (i >= dev_limit)
|
|
break;
|
|
if (IS_ERR_OR_NULL(mirror->mirror_ds))
|
|
continue;
|
|
if (!test_and_clear_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags))
|
|
continue;
|
|
/* mirror refcount put in cleanup_layoutstats */
|
|
if (!refcount_inc_not_zero(&mirror->ref))
|
|
continue;
|
|
dev = &mirror->mirror_ds->id_node;
|
|
memcpy(&devinfo->dev_id, &dev->deviceid, NFS4_DEVICEID4_SIZE);
|
|
devinfo->offset = 0;
|
|
devinfo->length = NFS4_MAX_UINT64;
|
|
spin_lock(&mirror->lock);
|
|
devinfo->read_count = mirror->read_stat.io_stat.ops_completed;
|
|
devinfo->read_bytes = mirror->read_stat.io_stat.bytes_completed;
|
|
devinfo->write_count = mirror->write_stat.io_stat.ops_completed;
|
|
devinfo->write_bytes = mirror->write_stat.io_stat.bytes_completed;
|
|
spin_unlock(&mirror->lock);
|
|
devinfo->layout_type = LAYOUT_FLEX_FILES;
|
|
devinfo->ld_private.ops = &layoutstat_ops;
|
|
devinfo->ld_private.data = mirror;
|
|
|
|
devinfo++;
|
|
i++;
|
|
}
|
|
return i;
|
|
}
|
|
|
|
static int
|
|
ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args)
|
|
{
|
|
struct nfs4_flexfile_layout *ff_layout;
|
|
const int dev_count = PNFS_LAYOUTSTATS_MAXDEV;
|
|
|
|
/* For now, send at most PNFS_LAYOUTSTATS_MAXDEV statistics */
|
|
args->devinfo = kmalloc_array(dev_count, sizeof(*args->devinfo), GFP_NOIO);
|
|
if (!args->devinfo)
|
|
return -ENOMEM;
|
|
|
|
spin_lock(&args->inode->i_lock);
|
|
ff_layout = FF_LAYOUT_FROM_HDR(NFS_I(args->inode)->layout);
|
|
args->num_dev = ff_layout_mirror_prepare_stats(&ff_layout->generic_hdr,
|
|
&args->devinfo[0], dev_count);
|
|
spin_unlock(&args->inode->i_lock);
|
|
if (!args->num_dev) {
|
|
kfree(args->devinfo);
|
|
args->devinfo = NULL;
|
|
return -ENOENT;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int
|
|
ff_layout_set_layoutdriver(struct nfs_server *server,
|
|
const struct nfs_fh *dummy)
|
|
{
|
|
#if IS_ENABLED(CONFIG_NFS_V4_2)
|
|
server->caps |= NFS_CAP_LAYOUTSTATS;
|
|
#endif
|
|
return 0;
|
|
}
|
|
|
|
static struct pnfs_layoutdriver_type flexfilelayout_type = {
|
|
.id = LAYOUT_FLEX_FILES,
|
|
.name = "LAYOUT_FLEX_FILES",
|
|
.owner = THIS_MODULE,
|
|
.flags = PNFS_LAYOUTGET_ON_OPEN,
|
|
.max_layoutget_response = 4096, /* 1 page or so... */
|
|
.set_layoutdriver = ff_layout_set_layoutdriver,
|
|
.alloc_layout_hdr = ff_layout_alloc_layout_hdr,
|
|
.free_layout_hdr = ff_layout_free_layout_hdr,
|
|
.alloc_lseg = ff_layout_alloc_lseg,
|
|
.free_lseg = ff_layout_free_lseg,
|
|
.add_lseg = ff_layout_add_lseg,
|
|
.pg_read_ops = &ff_layout_pg_read_ops,
|
|
.pg_write_ops = &ff_layout_pg_write_ops,
|
|
.get_ds_info = ff_layout_get_ds_info,
|
|
.free_deviceid_node = ff_layout_free_deviceid_node,
|
|
.mark_request_commit = pnfs_layout_mark_request_commit,
|
|
.clear_request_commit = pnfs_generic_clear_request_commit,
|
|
.scan_commit_lists = pnfs_generic_scan_commit_lists,
|
|
.recover_commit_reqs = pnfs_generic_recover_commit_reqs,
|
|
.commit_pagelist = ff_layout_commit_pagelist,
|
|
.read_pagelist = ff_layout_read_pagelist,
|
|
.write_pagelist = ff_layout_write_pagelist,
|
|
.alloc_deviceid_node = ff_layout_alloc_deviceid_node,
|
|
.prepare_layoutreturn = ff_layout_prepare_layoutreturn,
|
|
.sync = pnfs_nfs_generic_sync,
|
|
.prepare_layoutstats = ff_layout_prepare_layoutstats,
|
|
};
|
|
|
|
static int __init nfs4flexfilelayout_init(void)
|
|
{
|
|
printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Registering...\n",
|
|
__func__);
|
|
return pnfs_register_layoutdriver(&flexfilelayout_type);
|
|
}
|
|
|
|
static void __exit nfs4flexfilelayout_exit(void)
|
|
{
|
|
printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Unregistering...\n",
|
|
__func__);
|
|
pnfs_unregister_layoutdriver(&flexfilelayout_type);
|
|
}
|
|
|
|
MODULE_ALIAS("nfs-layouttype4-4");
|
|
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("The NFSv4 flexfile layout driver");
|
|
|
|
module_init(nfs4flexfilelayout_init);
|
|
module_exit(nfs4flexfilelayout_exit);
|