mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 11:18:45 +07:00
staging/lustre/llite: merge lclient.h into llite/vvp_internal.h
Move the definition of struct cl_client_cache to lustre/include/cl_object.h and move the rest of lustre/include/lclient.h in to lustre/llite/vvp_internal.h. Signed-off-by: John L. Hammond <john.hammond@intel.com> Reviewed-on: http://review.whamcloud.com/12592 Intel-bug-id: https://jira.hpdd.intel.com/browse/LU-5971 Reviewed-by: Jinshan Xiong <jinshan.xiong@intel.com> Reviewed-by: James Simmons <uja.ornl@gmail.com> Signed-off-by: Oleg Drokin <green@linuxhacker.ru> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
902a34ad72
commit
0d345656ea
@ -97,9 +97,12 @@
|
||||
* super-class definitions.
|
||||
*/
|
||||
#include "lu_object.h"
|
||||
#include <linux/atomic.h>
|
||||
#include "linux/lustre_compat25.h"
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/radix-tree.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/wait.h>
|
||||
|
||||
struct inode;
|
||||
|
||||
@ -2317,6 +2320,39 @@ void cl_lock_descr_print(const struct lu_env *env, void *cookie,
|
||||
const struct cl_lock_descr *descr);
|
||||
/* @} helper */
|
||||
|
||||
/**
|
||||
* Data structure managing a client's cached pages. A count of
|
||||
* "unstable" pages is maintained, and an LRU of clean pages is
|
||||
* maintained. "unstable" pages are pages pinned by the ptlrpc
|
||||
* layer for recovery purposes.
|
||||
*/
|
||||
struct cl_client_cache {
|
||||
/**
|
||||
* # of users (OSCs)
|
||||
*/
|
||||
atomic_t ccc_users;
|
||||
/**
|
||||
* # of threads are doing shrinking
|
||||
*/
|
||||
unsigned int ccc_lru_shrinkers;
|
||||
/**
|
||||
* # of LRU entries available
|
||||
*/
|
||||
atomic_t ccc_lru_left;
|
||||
/**
|
||||
* List of entities(OSCs) for this LRU cache
|
||||
*/
|
||||
struct list_head ccc_lru;
|
||||
/**
|
||||
* Max # of LRU entries
|
||||
*/
|
||||
unsigned long ccc_lru_max;
|
||||
/**
|
||||
* Lock to protect ccc_lru list
|
||||
*/
|
||||
spinlock_t ccc_lru_lock;
|
||||
};
|
||||
|
||||
/** @} cl_page */
|
||||
|
||||
/** \defgroup cl_lock cl_lock
|
||||
|
@ -1,409 +0,0 @@
|
||||
/*
|
||||
* GPL HEADER START
|
||||
*
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 only,
|
||||
* as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License version 2 for more details (a copy is included
|
||||
* in the LICENSE file that accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* version 2 along with this program; If not, see
|
||||
* http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
|
||||
*
|
||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
* have any questions.
|
||||
*
|
||||
* GPL HEADER END
|
||||
*/
|
||||
/*
|
||||
* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Use is subject to license terms.
|
||||
*
|
||||
* Copyright (c) 2011, 2012, Intel Corporation.
|
||||
*/
|
||||
/*
|
||||
* This file is part of Lustre, http://www.lustre.org/
|
||||
* Lustre is a trademark of Sun Microsystems, Inc.
|
||||
*
|
||||
* Definitions shared between vvp and liblustre, and other clients in the
|
||||
* future.
|
||||
*
|
||||
* Author: Oleg Drokin <oleg.drokin@sun.com>
|
||||
* Author: Nikita Danilov <nikita.danilov@sun.com>
|
||||
*/
|
||||
|
||||
#ifndef LCLIENT_H
|
||||
#define LCLIENT_H
|
||||
|
||||
blkcnt_t dirty_cnt(struct inode *inode);
|
||||
|
||||
int cl_glimpse_size0(struct inode *inode, int agl);
|
||||
int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io,
|
||||
struct inode *inode, struct cl_object *clob, int agl);
|
||||
|
||||
static inline int cl_glimpse_size(struct inode *inode)
|
||||
{
|
||||
return cl_glimpse_size0(inode, 0);
|
||||
}
|
||||
|
||||
static inline int cl_agl(struct inode *inode)
|
||||
{
|
||||
return cl_glimpse_size0(inode, 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* Locking policy for setattr.
|
||||
*/
|
||||
enum ccc_setattr_lock_type {
|
||||
/** Locking is done by server */
|
||||
SETATTR_NOLOCK,
|
||||
/** Extent lock is enqueued */
|
||||
SETATTR_EXTENT_LOCK,
|
||||
/** Existing local extent lock is used */
|
||||
SETATTR_MATCH_LOCK
|
||||
};
|
||||
|
||||
/**
|
||||
* IO state private to vvp or slp layers.
|
||||
*/
|
||||
struct ccc_io {
|
||||
/** super class */
|
||||
struct cl_io_slice cui_cl;
|
||||
struct cl_io_lock_link cui_link;
|
||||
/**
|
||||
* I/O vector information to or from which read/write is going.
|
||||
*/
|
||||
struct iov_iter *cui_iter;
|
||||
/**
|
||||
* Total size for the left IO.
|
||||
*/
|
||||
size_t cui_tot_count;
|
||||
|
||||
union {
|
||||
struct {
|
||||
enum ccc_setattr_lock_type cui_local_lock;
|
||||
} setattr;
|
||||
struct {
|
||||
struct cl_page_list cui_queue;
|
||||
unsigned long cui_written;
|
||||
int cui_from;
|
||||
int cui_to;
|
||||
} write;
|
||||
} u;
|
||||
/**
|
||||
* Layout version when this IO is initialized
|
||||
*/
|
||||
__u32 cui_layout_gen;
|
||||
/**
|
||||
* File descriptor against which IO is done.
|
||||
*/
|
||||
struct ll_file_data *cui_fd;
|
||||
struct kiocb *cui_iocb;
|
||||
};
|
||||
|
||||
/**
|
||||
* True, if \a io is a normal io, False for splice_{read,write}.
|
||||
* must be implemented in arch specific code.
|
||||
*/
|
||||
int cl_is_normalio(const struct lu_env *env, const struct cl_io *io);
|
||||
|
||||
extern struct lu_context_key ccc_key;
|
||||
extern struct lu_context_key ccc_session_key;
|
||||
|
||||
struct ccc_thread_info {
|
||||
struct cl_lock cti_lock;
|
||||
struct cl_lock_descr cti_descr;
|
||||
struct cl_io cti_io;
|
||||
struct cl_attr cti_attr;
|
||||
};
|
||||
|
||||
static inline struct ccc_thread_info *ccc_env_info(const struct lu_env *env)
|
||||
{
|
||||
struct ccc_thread_info *info;
|
||||
|
||||
info = lu_context_key_get(&env->le_ctx, &ccc_key);
|
||||
LASSERT(info);
|
||||
return info;
|
||||
}
|
||||
|
||||
static inline struct cl_lock *ccc_env_lock(const struct lu_env *env)
|
||||
{
|
||||
struct cl_lock *lock = &ccc_env_info(env)->cti_lock;
|
||||
|
||||
memset(lock, 0, sizeof(*lock));
|
||||
return lock;
|
||||
}
|
||||
|
||||
static inline struct cl_attr *ccc_env_thread_attr(const struct lu_env *env)
|
||||
{
|
||||
struct cl_attr *attr = &ccc_env_info(env)->cti_attr;
|
||||
|
||||
memset(attr, 0, sizeof(*attr));
|
||||
return attr;
|
||||
}
|
||||
|
||||
static inline struct cl_io *ccc_env_thread_io(const struct lu_env *env)
|
||||
{
|
||||
struct cl_io *io = &ccc_env_info(env)->cti_io;
|
||||
|
||||
memset(io, 0, sizeof(*io));
|
||||
return io;
|
||||
}
|
||||
|
||||
struct ccc_session {
|
||||
struct ccc_io cs_ios;
|
||||
};
|
||||
|
||||
static inline struct ccc_session *ccc_env_session(const struct lu_env *env)
|
||||
{
|
||||
struct ccc_session *ses;
|
||||
|
||||
ses = lu_context_key_get(env->le_ses, &ccc_session_key);
|
||||
LASSERT(ses);
|
||||
return ses;
|
||||
}
|
||||
|
||||
static inline struct ccc_io *ccc_env_io(const struct lu_env *env)
|
||||
{
|
||||
return &ccc_env_session(env)->cs_ios;
|
||||
}
|
||||
|
||||
/**
|
||||
* ccc-private object state.
|
||||
*/
|
||||
struct ccc_object {
|
||||
struct cl_object_header cob_header;
|
||||
struct cl_object cob_cl;
|
||||
struct inode *cob_inode;
|
||||
|
||||
/**
|
||||
* A list of dirty pages pending IO in the cache. Used by
|
||||
* SOM. Protected by ll_inode_info::lli_lock.
|
||||
*
|
||||
* \see ccc_page::cpg_pending_linkage
|
||||
*/
|
||||
struct list_head cob_pending_list;
|
||||
|
||||
/**
|
||||
* Access this counter is protected by inode->i_sem. Now that
|
||||
* the lifetime of transient pages must be covered by inode sem,
|
||||
* we don't need to hold any lock..
|
||||
*/
|
||||
int cob_transient_pages;
|
||||
/**
|
||||
* Number of outstanding mmaps on this file.
|
||||
*
|
||||
* \see ll_vm_open(), ll_vm_close().
|
||||
*/
|
||||
atomic_t cob_mmap_cnt;
|
||||
|
||||
/**
|
||||
* various flags
|
||||
* cob_discard_page_warned
|
||||
* if pages belonging to this object are discarded when a client
|
||||
* is evicted, some debug info will be printed, this flag will be set
|
||||
* during processing the first discarded page, then avoid flooding
|
||||
* debug message for lots of discarded pages.
|
||||
*
|
||||
* \see ll_dirty_page_discard_warn.
|
||||
*/
|
||||
unsigned int cob_discard_page_warned:1;
|
||||
};
|
||||
|
||||
/**
|
||||
* ccc-private page state.
|
||||
*/
|
||||
struct ccc_page {
|
||||
struct cl_page_slice cpg_cl;
|
||||
int cpg_defer_uptodate;
|
||||
int cpg_ra_used;
|
||||
int cpg_write_queued;
|
||||
/**
|
||||
* Non-empty iff this page is already counted in
|
||||
* ccc_object::cob_pending_list. Protected by
|
||||
* ccc_object::cob_pending_guard. This list is only used as a flag,
|
||||
* that is, never iterated through, only checked for list_empty(), but
|
||||
* having a list is useful for debugging.
|
||||
*/
|
||||
struct list_head cpg_pending_linkage;
|
||||
/** VM page */
|
||||
struct page *cpg_page;
|
||||
};
|
||||
|
||||
static inline struct ccc_page *cl2ccc_page(const struct cl_page_slice *slice)
|
||||
{
|
||||
return container_of(slice, struct ccc_page, cpg_cl);
|
||||
}
|
||||
|
||||
static inline pgoff_t ccc_index(struct ccc_page *ccc)
|
||||
{
|
||||
return ccc->cpg_cl.cpl_index;
|
||||
}
|
||||
|
||||
struct ccc_device {
|
||||
struct cl_device cdv_cl;
|
||||
struct super_block *cdv_sb;
|
||||
struct cl_device *cdv_next;
|
||||
};
|
||||
|
||||
struct ccc_lock {
|
||||
struct cl_lock_slice clk_cl;
|
||||
};
|
||||
|
||||
struct ccc_req {
|
||||
struct cl_req_slice crq_cl;
|
||||
};
|
||||
|
||||
void *ccc_key_init (const struct lu_context *ctx,
|
||||
struct lu_context_key *key);
|
||||
void ccc_key_fini (const struct lu_context *ctx,
|
||||
struct lu_context_key *key, void *data);
|
||||
void *ccc_session_key_init(const struct lu_context *ctx,
|
||||
struct lu_context_key *key);
|
||||
void ccc_session_key_fini(const struct lu_context *ctx,
|
||||
struct lu_context_key *key, void *data);
|
||||
|
||||
int ccc_device_init (const struct lu_env *env,
|
||||
struct lu_device *d,
|
||||
const char *name, struct lu_device *next);
|
||||
struct lu_device *ccc_device_fini (const struct lu_env *env,
|
||||
struct lu_device *d);
|
||||
struct lu_device *ccc_device_alloc(const struct lu_env *env,
|
||||
struct lu_device_type *t,
|
||||
struct lustre_cfg *cfg,
|
||||
const struct lu_device_operations *luops,
|
||||
const struct cl_device_operations *clops);
|
||||
struct lu_device *ccc_device_free (const struct lu_env *env,
|
||||
struct lu_device *d);
|
||||
struct lu_object *ccc_object_alloc(const struct lu_env *env,
|
||||
const struct lu_object_header *hdr,
|
||||
struct lu_device *dev,
|
||||
const struct cl_object_operations *clops,
|
||||
const struct lu_object_operations *luops);
|
||||
|
||||
int ccc_req_init(const struct lu_env *env, struct cl_device *dev,
|
||||
struct cl_req *req);
|
||||
void ccc_umount(const struct lu_env *env, struct cl_device *dev);
|
||||
int ccc_global_init(struct lu_device_type *device_type);
|
||||
void ccc_global_fini(struct lu_device_type *device_type);
|
||||
int ccc_object_init0(const struct lu_env *env, struct ccc_object *vob,
|
||||
const struct cl_object_conf *conf);
|
||||
int ccc_object_init(const struct lu_env *env, struct lu_object *obj,
|
||||
const struct lu_object_conf *conf);
|
||||
void ccc_object_free(const struct lu_env *env, struct lu_object *obj);
|
||||
int ccc_lock_init(const struct lu_env *env, struct cl_object *obj,
|
||||
struct cl_lock *lock, const struct cl_io *io,
|
||||
const struct cl_lock_operations *lkops);
|
||||
int ccc_object_glimpse(const struct lu_env *env,
|
||||
const struct cl_object *obj, struct ost_lvb *lvb);
|
||||
int ccc_fail(const struct lu_env *env, const struct cl_page_slice *slice);
|
||||
int ccc_transient_page_prep(const struct lu_env *env,
|
||||
const struct cl_page_slice *slice,
|
||||
struct cl_io *io);
|
||||
void ccc_lock_delete(const struct lu_env *env,
|
||||
const struct cl_lock_slice *slice);
|
||||
void ccc_lock_fini(const struct lu_env *env, struct cl_lock_slice *slice);
|
||||
int ccc_lock_enqueue(const struct lu_env *env,
|
||||
const struct cl_lock_slice *slice,
|
||||
struct cl_io *io, struct cl_sync_io *anchor);
|
||||
int ccc_io_one_lock_index(const struct lu_env *env, struct cl_io *io,
|
||||
__u32 enqflags, enum cl_lock_mode mode,
|
||||
pgoff_t start, pgoff_t end);
|
||||
int ccc_io_one_lock(const struct lu_env *env, struct cl_io *io,
|
||||
__u32 enqflags, enum cl_lock_mode mode,
|
||||
loff_t start, loff_t end);
|
||||
void ccc_io_end(const struct lu_env *env, const struct cl_io_slice *ios);
|
||||
void ccc_io_advance(const struct lu_env *env, const struct cl_io_slice *ios,
|
||||
size_t nob);
|
||||
void ccc_io_update_iov(const struct lu_env *env, struct ccc_io *cio,
|
||||
struct cl_io *io);
|
||||
int ccc_prep_size(const struct lu_env *env, struct cl_object *obj,
|
||||
struct cl_io *io, loff_t start, size_t count, int *exceed);
|
||||
void ccc_req_completion(const struct lu_env *env,
|
||||
const struct cl_req_slice *slice, int ioret);
|
||||
void ccc_req_attr_set(const struct lu_env *env,
|
||||
const struct cl_req_slice *slice,
|
||||
const struct cl_object *obj,
|
||||
struct cl_req_attr *oa, u64 flags);
|
||||
|
||||
struct lu_device *ccc2lu_dev (struct ccc_device *vdv);
|
||||
struct lu_object *ccc2lu (struct ccc_object *vob);
|
||||
struct ccc_device *lu2ccc_dev (const struct lu_device *d);
|
||||
struct ccc_device *cl2ccc_dev (const struct cl_device *d);
|
||||
struct ccc_object *lu2ccc (const struct lu_object *obj);
|
||||
struct ccc_object *cl2ccc (const struct cl_object *obj);
|
||||
struct ccc_lock *cl2ccc_lock (const struct cl_lock_slice *slice);
|
||||
struct ccc_io *cl2ccc_io (const struct lu_env *env,
|
||||
const struct cl_io_slice *slice);
|
||||
struct ccc_req *cl2ccc_req (const struct cl_req_slice *slice);
|
||||
struct page *cl2vm_page (const struct cl_page_slice *slice);
|
||||
struct inode *ccc_object_inode(const struct cl_object *obj);
|
||||
struct ccc_object *cl_inode2ccc (struct inode *inode);
|
||||
|
||||
int cl_setattr_ost(struct inode *inode, const struct iattr *attr);
|
||||
|
||||
int ccc_object_invariant(const struct cl_object *obj);
|
||||
int cl_file_inode_init(struct inode *inode, struct lustre_md *md);
|
||||
void cl_inode_fini(struct inode *inode);
|
||||
int cl_local_size(struct inode *inode);
|
||||
|
||||
__u16 ll_dirent_type_get(struct lu_dirent *ent);
|
||||
__u64 cl_fid_build_ino(const struct lu_fid *fid, int api32);
|
||||
__u32 cl_fid_build_gen(const struct lu_fid *fid);
|
||||
|
||||
# define CLOBINVRNT(env, clob, expr) \
|
||||
((void)sizeof(env), (void)sizeof(clob), (void)sizeof(!!(expr)))
|
||||
|
||||
int cl_init_ea_size(struct obd_export *md_exp, struct obd_export *dt_exp);
|
||||
int cl_ocd_update(struct obd_device *host,
|
||||
struct obd_device *watched,
|
||||
enum obd_notify_event ev, void *owner, void *data);
|
||||
|
||||
struct ccc_grouplock {
|
||||
struct lu_env *cg_env;
|
||||
struct cl_io *cg_io;
|
||||
struct cl_lock *cg_lock;
|
||||
unsigned long cg_gid;
|
||||
};
|
||||
|
||||
int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock,
|
||||
struct ccc_grouplock *cg);
|
||||
void cl_put_grouplock(struct ccc_grouplock *cg);
|
||||
|
||||
/**
|
||||
* New interfaces to get and put lov_stripe_md from lov layer. This violates
|
||||
* layering because lov_stripe_md is supposed to be a private data in lov.
|
||||
*
|
||||
* NB: If you find you have to use these interfaces for your new code, please
|
||||
* think about it again. These interfaces may be removed in the future for
|
||||
* better layering.
|
||||
*/
|
||||
struct lov_stripe_md *lov_lsm_get(struct cl_object *clobj);
|
||||
void lov_lsm_put(struct cl_object *clobj, struct lov_stripe_md *lsm);
|
||||
int lov_read_and_clear_async_rc(struct cl_object *clob);
|
||||
|
||||
struct lov_stripe_md *ccc_inode_lsm_get(struct inode *inode);
|
||||
void ccc_inode_lsm_put(struct inode *inode, struct lov_stripe_md *lsm);
|
||||
|
||||
/**
|
||||
* Data structure managing a client's cached clean pages. An LRU of
|
||||
* pages is maintained, along with other statistics.
|
||||
*/
|
||||
struct cl_client_cache {
|
||||
atomic_t ccc_users; /* # of users (OSCs) of this data */
|
||||
struct list_head ccc_lru; /* LRU list of cached clean pages */
|
||||
spinlock_t ccc_lru_lock; /* lock for list */
|
||||
atomic_t ccc_lru_left; /* # of LRU entries available */
|
||||
unsigned long ccc_lru_max; /* Max # of LRU entries possible */
|
||||
unsigned int ccc_lru_shrinkers; /* # of threads reclaiming */
|
||||
};
|
||||
|
||||
#endif /*LCLIENT_H */
|
@ -52,7 +52,6 @@
|
||||
#include <linux/file.h>
|
||||
|
||||
#include "../include/cl_object.h"
|
||||
#include "../include/lclient.h"
|
||||
#include "../llite/llite_internal.h"
|
||||
|
||||
static const struct cl_lock_descr whole_file = {
|
||||
|
@ -59,8 +59,6 @@
|
||||
#include "../include/lustre_mdc.h"
|
||||
#include "../include/cl_object.h"
|
||||
|
||||
#include "../include/lclient.h"
|
||||
|
||||
#include "../llite/llite_internal.h"
|
||||
|
||||
static const struct cl_req_operations ccc_req_ops;
|
||||
|
@ -41,8 +41,8 @@
|
||||
#include "../include/obd_support.h"
|
||||
#include "../include/obd.h"
|
||||
#include "../include/cl_object.h"
|
||||
#include "../include/lclient.h"
|
||||
|
||||
#include "vvp_internal.h"
|
||||
#include "../include/lustre_lite.h"
|
||||
|
||||
/* Initialize the default and maximum LOV EA and cookie sizes. This allows
|
||||
|
@ -43,11 +43,11 @@
|
||||
|
||||
/* for struct cl_lock_descr and struct cl_io */
|
||||
#include "../include/cl_object.h"
|
||||
#include "../include/lclient.h"
|
||||
#include "../include/lustre_mdc.h"
|
||||
#include "../include/lustre_intent.h"
|
||||
#include <linux/compat.h>
|
||||
#include <linux/posix_acl_xattr.h>
|
||||
#include "vvp_internal.h"
|
||||
|
||||
#ifndef FMODE_EXEC
|
||||
#define FMODE_EXEC 0
|
||||
|
@ -41,8 +41,374 @@
|
||||
#ifndef VVP_INTERNAL_H
|
||||
#define VVP_INTERNAL_H
|
||||
|
||||
#include "../include/lustre/lustre_idl.h"
|
||||
#include "../include/cl_object.h"
|
||||
#include "llite_internal.h"
|
||||
|
||||
enum obd_notify_event;
|
||||
struct inode;
|
||||
struct lov_stripe_md;
|
||||
struct lustre_md;
|
||||
struct obd_capa;
|
||||
struct obd_device;
|
||||
struct obd_export;
|
||||
struct page;
|
||||
|
||||
blkcnt_t dirty_cnt(struct inode *inode);
|
||||
|
||||
int cl_glimpse_size0(struct inode *inode, int agl);
|
||||
int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io,
|
||||
struct inode *inode, struct cl_object *clob, int agl);
|
||||
|
||||
static inline int cl_glimpse_size(struct inode *inode)
|
||||
{
|
||||
return cl_glimpse_size0(inode, 0);
|
||||
}
|
||||
|
||||
static inline int cl_agl(struct inode *inode)
|
||||
{
|
||||
return cl_glimpse_size0(inode, 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* Locking policy for setattr.
|
||||
*/
|
||||
enum ccc_setattr_lock_type {
|
||||
/** Locking is done by server */
|
||||
SETATTR_NOLOCK,
|
||||
/** Extent lock is enqueued */
|
||||
SETATTR_EXTENT_LOCK,
|
||||
/** Existing local extent lock is used */
|
||||
SETATTR_MATCH_LOCK
|
||||
};
|
||||
|
||||
/**
|
||||
* IO state private to vvp or slp layers.
|
||||
*/
|
||||
struct ccc_io {
|
||||
/** super class */
|
||||
struct cl_io_slice cui_cl;
|
||||
struct cl_io_lock_link cui_link;
|
||||
/**
|
||||
* I/O vector information to or from which read/write is going.
|
||||
*/
|
||||
struct iov_iter *cui_iter;
|
||||
/**
|
||||
* Total size for the left IO.
|
||||
*/
|
||||
size_t cui_tot_count;
|
||||
|
||||
union {
|
||||
struct {
|
||||
enum ccc_setattr_lock_type cui_local_lock;
|
||||
} setattr;
|
||||
struct {
|
||||
struct cl_page_list cui_queue;
|
||||
unsigned long cui_written;
|
||||
int cui_from;
|
||||
int cui_to;
|
||||
} write;
|
||||
} u;
|
||||
/**
|
||||
* Layout version when this IO is initialized
|
||||
*/
|
||||
__u32 cui_layout_gen;
|
||||
/**
|
||||
* File descriptor against which IO is done.
|
||||
*/
|
||||
struct ll_file_data *cui_fd;
|
||||
struct kiocb *cui_iocb;
|
||||
};
|
||||
|
||||
/**
|
||||
* True, if \a io is a normal io, False for other splice_{read,write}.
|
||||
* must be implemented in arch specific code.
|
||||
*/
|
||||
int cl_is_normalio(const struct lu_env *env, const struct cl_io *io);
|
||||
|
||||
extern struct lu_context_key ccc_key;
|
||||
extern struct lu_context_key ccc_session_key;
|
||||
|
||||
struct ccc_thread_info {
|
||||
struct cl_lock cti_lock;
|
||||
struct cl_lock_descr cti_descr;
|
||||
struct cl_io cti_io;
|
||||
struct cl_attr cti_attr;
|
||||
};
|
||||
|
||||
static inline struct ccc_thread_info *ccc_env_info(const struct lu_env *env)
|
||||
{
|
||||
struct ccc_thread_info *info;
|
||||
|
||||
info = lu_context_key_get(&env->le_ctx, &ccc_key);
|
||||
LASSERT(info);
|
||||
|
||||
return info;
|
||||
}
|
||||
|
||||
static inline struct cl_lock *ccc_env_lock(const struct lu_env *env)
|
||||
{
|
||||
struct cl_lock *lock = &ccc_env_info(env)->cti_lock;
|
||||
|
||||
memset(lock, 0, sizeof(*lock));
|
||||
return lock;
|
||||
}
|
||||
|
||||
static inline struct cl_attr *ccc_env_thread_attr(const struct lu_env *env)
|
||||
{
|
||||
struct cl_attr *attr = &ccc_env_info(env)->cti_attr;
|
||||
|
||||
memset(attr, 0, sizeof(*attr));
|
||||
|
||||
return attr;
|
||||
}
|
||||
|
||||
static inline struct cl_io *ccc_env_thread_io(const struct lu_env *env)
|
||||
{
|
||||
struct cl_io *io = &ccc_env_info(env)->cti_io;
|
||||
|
||||
memset(io, 0, sizeof(*io));
|
||||
|
||||
return io;
|
||||
}
|
||||
|
||||
struct ccc_session {
|
||||
struct ccc_io cs_ios;
|
||||
};
|
||||
|
||||
static inline struct ccc_session *ccc_env_session(const struct lu_env *env)
|
||||
{
|
||||
struct ccc_session *ses;
|
||||
|
||||
ses = lu_context_key_get(env->le_ses, &ccc_session_key);
|
||||
LASSERT(ses);
|
||||
|
||||
return ses;
|
||||
}
|
||||
|
||||
static inline struct ccc_io *ccc_env_io(const struct lu_env *env)
|
||||
{
|
||||
return &ccc_env_session(env)->cs_ios;
|
||||
}
|
||||
|
||||
/**
|
||||
* ccc-private object state.
|
||||
*/
|
||||
struct ccc_object {
|
||||
struct cl_object_header cob_header;
|
||||
struct cl_object cob_cl;
|
||||
struct inode *cob_inode;
|
||||
|
||||
/**
|
||||
* A list of dirty pages pending IO in the cache. Used by
|
||||
* SOM. Protected by ll_inode_info::lli_lock.
|
||||
*
|
||||
* \see ccc_page::cpg_pending_linkage
|
||||
*/
|
||||
struct list_head cob_pending_list;
|
||||
|
||||
/**
|
||||
* Access this counter is protected by inode->i_sem. Now that
|
||||
* the lifetime of transient pages must be covered by inode sem,
|
||||
* we don't need to hold any lock..
|
||||
*/
|
||||
int cob_transient_pages;
|
||||
/**
|
||||
* Number of outstanding mmaps on this file.
|
||||
*
|
||||
* \see ll_vm_open(), ll_vm_close().
|
||||
*/
|
||||
atomic_t cob_mmap_cnt;
|
||||
|
||||
/**
|
||||
* various flags
|
||||
* cob_discard_page_warned
|
||||
* if pages belonging to this object are discarded when a client
|
||||
* is evicted, some debug info will be printed, this flag will be set
|
||||
* during processing the first discarded page, then avoid flooding
|
||||
* debug message for lots of discarded pages.
|
||||
*
|
||||
* \see ll_dirty_page_discard_warn.
|
||||
*/
|
||||
unsigned int cob_discard_page_warned:1;
|
||||
};
|
||||
|
||||
/**
|
||||
* ccc-private page state.
|
||||
*/
|
||||
struct ccc_page {
|
||||
struct cl_page_slice cpg_cl;
|
||||
int cpg_defer_uptodate;
|
||||
int cpg_ra_used;
|
||||
int cpg_write_queued;
|
||||
/**
|
||||
* Non-empty iff this page is already counted in
|
||||
* ccc_object::cob_pending_list. Protected by
|
||||
* ccc_object::cob_pending_guard. This list is only used as a flag,
|
||||
* that is, never iterated through, only checked for list_empty(), but
|
||||
* having a list is useful for debugging.
|
||||
*/
|
||||
struct list_head cpg_pending_linkage;
|
||||
/** VM page */
|
||||
struct page *cpg_page;
|
||||
};
|
||||
|
||||
static inline struct ccc_page *cl2ccc_page(const struct cl_page_slice *slice)
|
||||
{
|
||||
return container_of(slice, struct ccc_page, cpg_cl);
|
||||
}
|
||||
|
||||
static inline pgoff_t ccc_index(struct ccc_page *ccc)
|
||||
{
|
||||
return ccc->cpg_cl.cpl_index;
|
||||
}
|
||||
|
||||
struct cl_page *ccc_vmpage_page_transient(struct page *vmpage);
|
||||
|
||||
struct ccc_device {
|
||||
struct cl_device cdv_cl;
|
||||
struct super_block *cdv_sb;
|
||||
struct cl_device *cdv_next;
|
||||
};
|
||||
|
||||
struct ccc_lock {
|
||||
struct cl_lock_slice clk_cl;
|
||||
};
|
||||
|
||||
struct ccc_req {
|
||||
struct cl_req_slice crq_cl;
|
||||
};
|
||||
|
||||
void *ccc_key_init(const struct lu_context *ctx,
|
||||
struct lu_context_key *key);
|
||||
void ccc_key_fini(const struct lu_context *ctx,
|
||||
struct lu_context_key *key, void *data);
|
||||
void *ccc_session_key_init(const struct lu_context *ctx,
|
||||
struct lu_context_key *key);
|
||||
void ccc_session_key_fini(const struct lu_context *ctx,
|
||||
struct lu_context_key *key, void *data);
|
||||
|
||||
int ccc_device_init(const struct lu_env *env,
|
||||
struct lu_device *d,
|
||||
const char *name, struct lu_device *next);
|
||||
struct lu_device *ccc_device_fini(const struct lu_env *env,
|
||||
struct lu_device *d);
|
||||
struct lu_device *ccc_device_alloc(const struct lu_env *env,
|
||||
struct lu_device_type *t,
|
||||
struct lustre_cfg *cfg,
|
||||
const struct lu_device_operations *luops,
|
||||
const struct cl_device_operations *clops);
|
||||
struct lu_device *ccc_device_free(const struct lu_env *env,
|
||||
struct lu_device *d);
|
||||
struct lu_object *ccc_object_alloc(const struct lu_env *env,
|
||||
const struct lu_object_header *hdr,
|
||||
struct lu_device *dev,
|
||||
const struct cl_object_operations *clops,
|
||||
const struct lu_object_operations *luops);
|
||||
|
||||
int ccc_req_init(const struct lu_env *env, struct cl_device *dev,
|
||||
struct cl_req *req);
|
||||
void ccc_umount(const struct lu_env *env, struct cl_device *dev);
|
||||
int ccc_global_init(struct lu_device_type *device_type);
|
||||
void ccc_global_fini(struct lu_device_type *device_type);
|
||||
int ccc_object_init0(const struct lu_env *env, struct ccc_object *vob,
|
||||
const struct cl_object_conf *conf);
|
||||
int ccc_object_init(const struct lu_env *env, struct lu_object *obj,
|
||||
const struct lu_object_conf *conf);
|
||||
void ccc_object_free(const struct lu_env *env, struct lu_object *obj);
|
||||
int ccc_lock_init(const struct lu_env *env, struct cl_object *obj,
|
||||
struct cl_lock *lock, const struct cl_io *io,
|
||||
const struct cl_lock_operations *lkops);
|
||||
int ccc_object_glimpse(const struct lu_env *env,
|
||||
const struct cl_object *obj, struct ost_lvb *lvb);
|
||||
int ccc_fail(const struct lu_env *env, const struct cl_page_slice *slice);
|
||||
int ccc_transient_page_prep(const struct lu_env *env,
|
||||
const struct cl_page_slice *slice,
|
||||
struct cl_io *io);
|
||||
void ccc_lock_delete(const struct lu_env *env,
|
||||
const struct cl_lock_slice *slice);
|
||||
void ccc_lock_fini(const struct lu_env *env, struct cl_lock_slice *slice);
|
||||
int ccc_lock_enqueue(const struct lu_env *env,
|
||||
const struct cl_lock_slice *slice,
|
||||
struct cl_io *io, struct cl_sync_io *anchor);
|
||||
|
||||
int ccc_io_one_lock_index(const struct lu_env *env, struct cl_io *io,
|
||||
__u32 enqflags, enum cl_lock_mode mode,
|
||||
pgoff_t start, pgoff_t end);
|
||||
int ccc_io_one_lock(const struct lu_env *env, struct cl_io *io,
|
||||
__u32 enqflags, enum cl_lock_mode mode,
|
||||
loff_t start, loff_t end);
|
||||
void ccc_io_end(const struct lu_env *env, const struct cl_io_slice *ios);
|
||||
void ccc_io_advance(const struct lu_env *env, const struct cl_io_slice *ios,
|
||||
size_t nob);
|
||||
void ccc_io_update_iov(const struct lu_env *env, struct ccc_io *cio,
|
||||
struct cl_io *io);
|
||||
int ccc_prep_size(const struct lu_env *env, struct cl_object *obj,
|
||||
struct cl_io *io, loff_t start, size_t count, int *exceed);
|
||||
void ccc_req_completion(const struct lu_env *env,
|
||||
const struct cl_req_slice *slice, int ioret);
|
||||
void ccc_req_attr_set(const struct lu_env *env,
|
||||
const struct cl_req_slice *slice,
|
||||
const struct cl_object *obj,
|
||||
struct cl_req_attr *oa, u64 flags);
|
||||
|
||||
struct lu_device *ccc2lu_dev(struct ccc_device *vdv);
|
||||
struct lu_object *ccc2lu(struct ccc_object *vob);
|
||||
struct ccc_device *lu2ccc_dev(const struct lu_device *d);
|
||||
struct ccc_device *cl2ccc_dev(const struct cl_device *d);
|
||||
struct ccc_object *lu2ccc(const struct lu_object *obj);
|
||||
struct ccc_object *cl2ccc(const struct cl_object *obj);
|
||||
struct ccc_lock *cl2ccc_lock(const struct cl_lock_slice *slice);
|
||||
struct ccc_io *cl2ccc_io(const struct lu_env *env,
|
||||
const struct cl_io_slice *slice);
|
||||
struct ccc_req *cl2ccc_req(const struct cl_req_slice *slice);
|
||||
struct page *cl2vm_page(const struct cl_page_slice *slice);
|
||||
struct inode *ccc_object_inode(const struct cl_object *obj);
|
||||
struct ccc_object *cl_inode2ccc(struct inode *inode);
|
||||
|
||||
int cl_setattr_ost(struct inode *inode, const struct iattr *attr);
|
||||
|
||||
int ccc_object_invariant(const struct cl_object *obj);
|
||||
int cl_file_inode_init(struct inode *inode, struct lustre_md *md);
|
||||
void cl_inode_fini(struct inode *inode);
|
||||
int cl_local_size(struct inode *inode);
|
||||
|
||||
__u16 ll_dirent_type_get(struct lu_dirent *ent);
|
||||
__u64 cl_fid_build_ino(const struct lu_fid *fid, int api32);
|
||||
__u32 cl_fid_build_gen(const struct lu_fid *fid);
|
||||
|
||||
# define CLOBINVRNT(env, clob, expr) \
|
||||
((void)sizeof(env), (void)sizeof(clob), (void)sizeof(!!(expr)))
|
||||
|
||||
int cl_init_ea_size(struct obd_export *md_exp, struct obd_export *dt_exp);
|
||||
int cl_ocd_update(struct obd_device *host,
|
||||
struct obd_device *watched,
|
||||
enum obd_notify_event ev, void *owner, void *data);
|
||||
|
||||
struct ccc_grouplock {
|
||||
struct lu_env *cg_env;
|
||||
struct cl_io *cg_io;
|
||||
struct cl_lock *cg_lock;
|
||||
unsigned long cg_gid;
|
||||
};
|
||||
|
||||
int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock,
|
||||
struct ccc_grouplock *cg);
|
||||
void cl_put_grouplock(struct ccc_grouplock *cg);
|
||||
|
||||
/**
|
||||
* New interfaces to get and put lov_stripe_md from lov layer. This violates
|
||||
* layering because lov_stripe_md is supposed to be a private data in lov.
|
||||
*
|
||||
* NB: If you find you have to use these interfaces for your new code, please
|
||||
* think about it again. These interfaces may be removed in the future for
|
||||
* better layering.
|
||||
*/
|
||||
struct lov_stripe_md *lov_lsm_get(struct cl_object *clobj);
|
||||
void lov_lsm_put(struct cl_object *clobj, struct lov_stripe_md *lsm);
|
||||
int lov_read_and_clear_async_rc(struct cl_object *clob);
|
||||
|
||||
struct lov_stripe_md *ccc_inode_lsm_get(struct inode *inode);
|
||||
void ccc_inode_lsm_put(struct inode *inode, struct lov_stripe_md *lsm);
|
||||
|
||||
int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
|
||||
struct cl_io *io);
|
||||
|
@ -44,6 +44,7 @@
|
||||
#include "../include/obd.h"
|
||||
#include "../include/lustre_lite.h"
|
||||
|
||||
#include "llite_internal.h"
|
||||
#include "vvp_internal.h"
|
||||
|
||||
static struct vvp_io *cl2vvp_io(const struct lu_env *env,
|
||||
|
@ -45,6 +45,7 @@
|
||||
#include "../include/obd.h"
|
||||
#include "../include/lustre_lite.h"
|
||||
|
||||
#include "llite_internal.h"
|
||||
#include "vvp_internal.h"
|
||||
|
||||
/*****************************************************************************
|
||||
|
@ -44,6 +44,7 @@
|
||||
#include "../include/obd.h"
|
||||
#include "../include/lustre_lite.h"
|
||||
|
||||
#include "llite_internal.h"
|
||||
#include "vvp_internal.h"
|
||||
|
||||
/*****************************************************************************
|
||||
|
@ -54,7 +54,6 @@
|
||||
#include "../include/lprocfs_status.h"
|
||||
#include "../include/lustre_param.h"
|
||||
#include "../include/cl_object.h"
|
||||
#include "../include/lclient.h" /* for cl_client_lru */
|
||||
#include "../include/lustre/ll_fiemap.h"
|
||||
#include "../include/lustre_fid.h"
|
||||
|
||||
|
@ -51,7 +51,6 @@
|
||||
#include "../include/obd.h"
|
||||
/* osc_build_res_name() */
|
||||
#include "../include/cl_object.h"
|
||||
#include "../include/lclient.h"
|
||||
#include "osc_internal.h"
|
||||
|
||||
/** \defgroup osc osc
|
||||
|
Loading…
Reference in New Issue
Block a user