2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* linux/fs/nfs/pagelist.c
|
|
|
|
*
|
|
|
|
* A set of helper functions for managing NFS read and write requests.
|
|
|
|
* The main purpose of these routines is to provide support for the
|
|
|
|
* coalescing of several requests into a single RPC call.
|
|
|
|
*
|
|
|
|
* Copyright 2000, 2001 (c) Trond Myklebust <trond.myklebust@fys.uio.no>
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/file.h>
|
Detach sched.h from mm.h
First thing mm.h does is including sched.h solely for can_do_mlock() inline
function which has "current" dereference inside. By dealing with can_do_mlock()
mm.h can be detached from sched.h which is good. See below, why.
This patch
a) removes unconditional inclusion of sched.h from mm.h
b) makes can_do_mlock() normal function in mm/mlock.c
c) exports can_do_mlock() to not break compilation
d) adds sched.h inclusions back to files that were getting it indirectly.
e) adds less bloated headers to some files (asm/signal.h, jiffies.h) that were
getting them indirectly
Net result is:
a) mm.h users would get less code to open, read, preprocess, parse, ... if
they don't need sched.h
b) sched.h stops being dependency for significant number of files:
on x86_64 allmodconfig touching sched.h results in recompile of 4083 files,
after patch it's only 3744 (-8.3%).
Cross-compile tested on
all arm defconfigs, all mips defconfigs, all powerpc defconfigs,
alpha alpha-up
arm
i386 i386-up i386-defconfig i386-allnoconfig
ia64 ia64-up
m68k
mips
parisc parisc-up
powerpc powerpc-up
s390 s390-up
sparc sparc-up
sparc64 sparc64-up
um-x86_64
x86_64 x86_64-up x86_64-defconfig x86_64-allnoconfig
as well as my two usual configs.
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-21 04:22:52 +07:00
|
|
|
#include <linux/sched.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
#include <linux/sunrpc/clnt.h>
|
2012-01-18 10:04:24 +07:00
|
|
|
#include <linux/nfs.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
#include <linux/nfs3.h>
|
|
|
|
#include <linux/nfs4.h>
|
|
|
|
#include <linux/nfs_page.h>
|
|
|
|
#include <linux/nfs_fs.h>
|
|
|
|
#include <linux/nfs_mount.h>
|
2011-05-27 03:00:52 +07:00
|
|
|
#include <linux/export.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2007-04-10 20:26:35 +07:00
|
|
|
#include "internal.h"
|
2011-03-01 08:34:15 +07:00
|
|
|
#include "pnfs.h"
|
2007-04-10 20:26:35 +07:00
|
|
|
|
2014-05-06 20:12:32 +07:00
|
|
|
#define NFSDBG_FACILITY NFSDBG_PAGECACHE
|
|
|
|
|
2006-12-07 11:33:20 +07:00
|
|
|
static struct kmem_cache *nfs_page_cachep;
|
2014-05-06 20:12:36 +07:00
|
|
|
static const struct rpc_call_ops nfs_pgio_common_ops;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2014-05-06 20:12:29 +07:00
|
|
|
static bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount)
|
2012-04-21 01:47:45 +07:00
|
|
|
{
|
|
|
|
p->npages = pagecount;
|
|
|
|
if (pagecount <= ARRAY_SIZE(p->page_array))
|
|
|
|
p->pagevec = p->page_array;
|
|
|
|
else {
|
|
|
|
p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_KERNEL);
|
|
|
|
if (!p->pagevec)
|
|
|
|
p->npages = 0;
|
|
|
|
}
|
|
|
|
return p->pagevec != NULL;
|
|
|
|
}
|
|
|
|
|
2012-04-21 01:47:46 +07:00
|
|
|
void nfs_pgheader_init(struct nfs_pageio_descriptor *desc,
|
|
|
|
struct nfs_pgio_header *hdr,
|
|
|
|
void (*release)(struct nfs_pgio_header *hdr))
|
|
|
|
{
|
|
|
|
hdr->req = nfs_list_entry(desc->pg_list.next);
|
|
|
|
hdr->inode = desc->pg_inode;
|
|
|
|
hdr->cred = hdr->req->wb_context->cred;
|
|
|
|
hdr->io_start = req_offset(hdr->req);
|
|
|
|
hdr->good_bytes = desc->pg_count;
|
2012-04-21 01:47:51 +07:00
|
|
|
hdr->dreq = desc->pg_dreq;
|
2012-08-02 19:36:09 +07:00
|
|
|
hdr->layout_private = desc->pg_layout_private;
|
2012-04-21 01:47:46 +07:00
|
|
|
hdr->release = release;
|
2012-04-21 01:47:48 +07:00
|
|
|
hdr->completion_ops = desc->pg_completion_ops;
|
2012-04-21 01:47:51 +07:00
|
|
|
if (hdr->completion_ops->init_hdr)
|
|
|
|
hdr->completion_ops->init_hdr(hdr);
|
2012-04-21 01:47:46 +07:00
|
|
|
}
|
2012-07-31 03:05:25 +07:00
|
|
|
EXPORT_SYMBOL_GPL(nfs_pgheader_init);
|
2012-04-21 01:47:46 +07:00
|
|
|
|
|
|
|
void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos)
|
|
|
|
{
|
|
|
|
spin_lock(&hdr->lock);
|
|
|
|
if (pos < hdr->io_start + hdr->good_bytes) {
|
|
|
|
set_bit(NFS_IOHDR_ERROR, &hdr->flags);
|
|
|
|
clear_bit(NFS_IOHDR_EOF, &hdr->flags);
|
|
|
|
hdr->good_bytes = pos - hdr->io_start;
|
|
|
|
hdr->error = error;
|
|
|
|
}
|
|
|
|
spin_unlock(&hdr->lock);
|
|
|
|
}
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
static inline struct nfs_page *
|
|
|
|
nfs_page_alloc(void)
|
|
|
|
{
|
2012-08-01 06:45:16 +07:00
|
|
|
struct nfs_page *p = kmem_cache_zalloc(nfs_page_cachep, GFP_NOIO);
|
2010-12-10 05:17:15 +07:00
|
|
|
if (p)
|
2005-04-17 05:20:36 +07:00
|
|
|
INIT_LIST_HEAD(&p->wb_list);
|
|
|
|
return p;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
nfs_page_free(struct nfs_page *p)
|
|
|
|
{
|
|
|
|
kmem_cache_free(nfs_page_cachep, p);
|
|
|
|
}
|
|
|
|
|
2013-04-09 08:38:12 +07:00
|
|
|
static void
|
|
|
|
nfs_iocounter_inc(struct nfs_io_counter *c)
|
|
|
|
{
|
|
|
|
atomic_inc(&c->io_count);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
nfs_iocounter_dec(struct nfs_io_counter *c)
|
|
|
|
{
|
|
|
|
if (atomic_dec_and_test(&c->io_count)) {
|
|
|
|
clear_bit(NFS_IO_INPROGRESS, &c->flags);
|
2014-03-18 00:06:10 +07:00
|
|
|
smp_mb__after_atomic();
|
2013-04-09 08:38:12 +07:00
|
|
|
wake_up_bit(&c->flags, NFS_IO_INPROGRESS);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
__nfs_iocounter_wait(struct nfs_io_counter *c)
|
|
|
|
{
|
|
|
|
wait_queue_head_t *wq = bit_waitqueue(&c->flags, NFS_IO_INPROGRESS);
|
|
|
|
DEFINE_WAIT_BIT(q, &c->flags, NFS_IO_INPROGRESS);
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
do {
|
|
|
|
prepare_to_wait(wq, &q.wait, TASK_KILLABLE);
|
|
|
|
set_bit(NFS_IO_INPROGRESS, &c->flags);
|
|
|
|
if (atomic_read(&c->io_count) == 0)
|
|
|
|
break;
|
2014-07-07 12:16:04 +07:00
|
|
|
ret = nfs_wait_bit_killable(&q.key);
|
2014-08-05 22:19:42 +07:00
|
|
|
} while (atomic_read(&c->io_count) != 0 && !ret);
|
2013-04-09 08:38:12 +07:00
|
|
|
finish_wait(wq, &q.wait);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* nfs_iocounter_wait - wait for i/o to complete
|
|
|
|
* @c: nfs_io_counter to use
|
|
|
|
*
|
|
|
|
* returns -ERESTARTSYS if interrupted by a fatal signal.
|
|
|
|
* Otherwise returns 0 once the io_count hits 0.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
nfs_iocounter_wait(struct nfs_io_counter *c)
|
|
|
|
{
|
|
|
|
if (atomic_read(&c->io_count) == 0)
|
|
|
|
return 0;
|
|
|
|
return __nfs_iocounter_wait(c);
|
|
|
|
}
|
|
|
|
|
2014-05-15 22:56:45 +07:00
|
|
|
/*
|
|
|
|
* nfs_page_group_lock - lock the head of the page group
|
|
|
|
* @req - request in group that is to be locked
|
2014-08-08 22:00:53 +07:00
|
|
|
* @nonblock - if true don't block waiting for lock
|
2014-05-15 22:56:45 +07:00
|
|
|
*
|
|
|
|
* this lock must be held if modifying the page group list
|
2014-07-18 07:42:15 +07:00
|
|
|
*
|
2014-08-08 22:00:54 +07:00
|
|
|
* return 0 on success, < 0 on error: -EDELAY if nonblocking or the
|
|
|
|
* result from wait_on_bit_lock
|
|
|
|
*
|
|
|
|
* NOTE: calling with nonblock=false should always have set the
|
|
|
|
* lock bit (see fs/buffer.c and other uses of wait_on_bit_lock
|
|
|
|
* with TASK_UNINTERRUPTIBLE), so there is no need to check the result.
|
2014-05-15 22:56:45 +07:00
|
|
|
*/
|
2014-07-18 07:42:15 +07:00
|
|
|
int
|
2014-08-08 22:00:53 +07:00
|
|
|
nfs_page_group_lock(struct nfs_page *req, bool nonblock)
|
2014-05-15 22:56:45 +07:00
|
|
|
{
|
|
|
|
struct nfs_page *head = req->wb_head;
|
|
|
|
|
|
|
|
WARN_ON_ONCE(head != head->wb_head);
|
|
|
|
|
2014-08-08 22:00:54 +07:00
|
|
|
if (!test_and_set_bit(PG_HEADLOCK, &head->wb_flags))
|
|
|
|
return 0;
|
2014-07-18 07:42:15 +07:00
|
|
|
|
2014-08-08 22:00:54 +07:00
|
|
|
if (!nonblock)
|
|
|
|
return wait_on_bit_lock(&head->wb_flags, PG_HEADLOCK,
|
|
|
|
TASK_UNINTERRUPTIBLE);
|
|
|
|
|
|
|
|
return -EAGAIN;
|
2014-05-15 22:56:45 +07:00
|
|
|
}
|
|
|
|
|
2014-08-08 22:00:57 +07:00
|
|
|
/*
|
|
|
|
* nfs_page_group_lock_wait - wait for the lock to clear, but don't grab it
|
|
|
|
* @req - a request in the group
|
|
|
|
*
|
|
|
|
* This is a blocking call to wait for the group lock to be cleared.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
nfs_page_group_lock_wait(struct nfs_page *req)
|
|
|
|
{
|
|
|
|
struct nfs_page *head = req->wb_head;
|
|
|
|
|
|
|
|
WARN_ON_ONCE(head != head->wb_head);
|
|
|
|
|
|
|
|
wait_on_bit(&head->wb_flags, PG_HEADLOCK,
|
|
|
|
TASK_UNINTERRUPTIBLE);
|
|
|
|
}
|
|
|
|
|
2014-05-15 22:56:45 +07:00
|
|
|
/*
|
|
|
|
* nfs_page_group_unlock - unlock the head of the page group
|
|
|
|
* @req - request in group that is to be unlocked
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
nfs_page_group_unlock(struct nfs_page *req)
|
|
|
|
{
|
|
|
|
struct nfs_page *head = req->wb_head;
|
|
|
|
|
|
|
|
WARN_ON_ONCE(head != head->wb_head);
|
|
|
|
|
2014-06-11 05:02:42 +07:00
|
|
|
smp_mb__before_atomic();
|
2014-05-15 22:56:45 +07:00
|
|
|
clear_bit(PG_HEADLOCK, &head->wb_flags);
|
2014-06-11 05:02:42 +07:00
|
|
|
smp_mb__after_atomic();
|
2014-05-15 22:56:45 +07:00
|
|
|
wake_up_bit(&head->wb_flags, PG_HEADLOCK);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* nfs_page_group_sync_on_bit_locked
|
|
|
|
*
|
|
|
|
* must be called with page group lock held
|
|
|
|
*/
|
|
|
|
static bool
|
|
|
|
nfs_page_group_sync_on_bit_locked(struct nfs_page *req, unsigned int bit)
|
|
|
|
{
|
|
|
|
struct nfs_page *head = req->wb_head;
|
|
|
|
struct nfs_page *tmp;
|
|
|
|
|
|
|
|
WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &head->wb_flags));
|
|
|
|
WARN_ON_ONCE(test_and_set_bit(bit, &req->wb_flags));
|
|
|
|
|
|
|
|
tmp = req->wb_this_page;
|
|
|
|
while (tmp != req) {
|
|
|
|
if (!test_bit(bit, &tmp->wb_flags))
|
|
|
|
return false;
|
|
|
|
tmp = tmp->wb_this_page;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* true! reset all bits */
|
|
|
|
tmp = req;
|
|
|
|
do {
|
|
|
|
clear_bit(bit, &tmp->wb_flags);
|
|
|
|
tmp = tmp->wb_this_page;
|
|
|
|
} while (tmp != req);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* nfs_page_group_sync_on_bit - set bit on current request, but only
|
|
|
|
* return true if the bit is set for all requests in page group
|
|
|
|
* @req - request in page group
|
|
|
|
* @bit - PG_* bit that is used to sync page group
|
|
|
|
*/
|
|
|
|
bool nfs_page_group_sync_on_bit(struct nfs_page *req, unsigned int bit)
|
|
|
|
{
|
|
|
|
bool ret;
|
|
|
|
|
2014-08-08 22:00:53 +07:00
|
|
|
nfs_page_group_lock(req, false);
|
2014-05-15 22:56:45 +07:00
|
|
|
ret = nfs_page_group_sync_on_bit_locked(req, bit);
|
|
|
|
nfs_page_group_unlock(req);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* nfs_page_group_init - Initialize the page group linkage for @req
|
|
|
|
* @req - a new nfs request
|
|
|
|
* @prev - the previous request in page group, or NULL if @req is the first
|
|
|
|
* or only request in the group (the head).
|
|
|
|
*/
|
|
|
|
static inline void
|
|
|
|
nfs_page_group_init(struct nfs_page *req, struct nfs_page *prev)
|
|
|
|
{
|
|
|
|
WARN_ON_ONCE(prev == req);
|
|
|
|
|
|
|
|
if (!prev) {
|
2014-07-11 21:20:46 +07:00
|
|
|
/* a head request */
|
2014-05-15 22:56:45 +07:00
|
|
|
req->wb_head = req;
|
|
|
|
req->wb_this_page = req;
|
|
|
|
} else {
|
2014-07-11 21:20:46 +07:00
|
|
|
/* a subrequest */
|
2014-05-15 22:56:45 +07:00
|
|
|
WARN_ON_ONCE(prev->wb_this_page != prev->wb_head);
|
|
|
|
WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &prev->wb_head->wb_flags));
|
|
|
|
req->wb_head = prev->wb_head;
|
|
|
|
req->wb_this_page = prev->wb_this_page;
|
|
|
|
prev->wb_this_page = req;
|
|
|
|
|
2014-07-11 21:20:46 +07:00
|
|
|
/* All subrequests take a ref on the head request until
|
|
|
|
* nfs_page_group_destroy is called */
|
|
|
|
kref_get(&req->wb_head->wb_kref);
|
|
|
|
|
2014-05-15 22:56:45 +07:00
|
|
|
/* grab extra ref if head request has extra ref from
|
|
|
|
* the write/commit path to handle handoff between write
|
|
|
|
* and commit lists */
|
2014-07-11 21:20:45 +07:00
|
|
|
if (test_bit(PG_INODE_REF, &prev->wb_head->wb_flags)) {
|
|
|
|
set_bit(PG_INODE_REF, &req->wb_flags);
|
2014-05-15 22:56:45 +07:00
|
|
|
kref_get(&req->wb_kref);
|
2014-07-11 21:20:45 +07:00
|
|
|
}
|
2014-05-15 22:56:45 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* nfs_page_group_destroy - sync the destruction of page groups
|
|
|
|
* @req - request that no longer needs the page group
|
|
|
|
*
|
|
|
|
* releases the page group reference from each member once all
|
|
|
|
* members have called this function.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
nfs_page_group_destroy(struct kref *kref)
|
|
|
|
{
|
|
|
|
struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref);
|
|
|
|
struct nfs_page *tmp, *next;
|
|
|
|
|
2014-07-11 21:20:46 +07:00
|
|
|
/* subrequests must release the ref on the head request */
|
|
|
|
if (req->wb_head != req)
|
|
|
|
nfs_release_request(req->wb_head);
|
|
|
|
|
2014-05-15 22:56:45 +07:00
|
|
|
if (!nfs_page_group_sync_on_bit(req, PG_TEARDOWN))
|
|
|
|
return;
|
|
|
|
|
|
|
|
tmp = req;
|
|
|
|
do {
|
|
|
|
next = tmp->wb_this_page;
|
|
|
|
/* unlink and free */
|
|
|
|
tmp->wb_this_page = tmp;
|
|
|
|
tmp->wb_head = tmp;
|
|
|
|
nfs_free_request(tmp);
|
|
|
|
tmp = next;
|
|
|
|
} while (tmp != req);
|
|
|
|
}
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/**
|
|
|
|
* nfs_create_request - Create an NFS read/write request.
|
2011-10-25 23:17:43 +07:00
|
|
|
* @ctx: open context to use
|
2005-04-17 05:20:36 +07:00
|
|
|
* @page: page to write
|
2014-05-15 22:56:45 +07:00
|
|
|
* @last: last nfs request created for this page group or NULL if head
|
2005-04-17 05:20:36 +07:00
|
|
|
* @offset: starting offset within the page for the write
|
|
|
|
* @count: number of bytes to read/write
|
|
|
|
*
|
|
|
|
* The page must be locked by the caller. This makes sure we never
|
2007-04-27 07:25:51 +07:00
|
|
|
* create two different requests for the same page.
|
2005-04-17 05:20:36 +07:00
|
|
|
* User should ensure it is safe to sleep in this function.
|
|
|
|
*/
|
|
|
|
struct nfs_page *
|
2014-05-15 22:56:42 +07:00
|
|
|
nfs_create_request(struct nfs_open_context *ctx, struct page *page,
|
2014-05-15 22:56:45 +07:00
|
|
|
struct nfs_page *last, unsigned int offset,
|
|
|
|
unsigned int count)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
struct nfs_page *req;
|
2012-08-14 04:15:50 +07:00
|
|
|
struct nfs_lock_context *l_ctx;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2013-03-19 06:45:14 +07:00
|
|
|
if (test_bit(NFS_CONTEXT_BAD, &ctx->flags))
|
|
|
|
return ERR_PTR(-EBADF);
|
2010-05-13 23:51:02 +07:00
|
|
|
/* try to allocate the request struct */
|
|
|
|
req = nfs_page_alloc();
|
|
|
|
if (req == NULL)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2010-10-28 21:10:37 +07:00
|
|
|
/* get lock context early so we can deal with alloc failures */
|
2012-08-14 04:15:50 +07:00
|
|
|
l_ctx = nfs_get_lock_context(ctx);
|
|
|
|
if (IS_ERR(l_ctx)) {
|
2010-10-28 21:10:37 +07:00
|
|
|
nfs_page_free(req);
|
2012-08-14 04:15:50 +07:00
|
|
|
return ERR_CAST(l_ctx);
|
2010-10-28 21:10:37 +07:00
|
|
|
}
|
2012-08-14 04:15:50 +07:00
|
|
|
req->wb_lock_context = l_ctx;
|
2013-04-09 08:38:12 +07:00
|
|
|
nfs_iocounter_inc(&l_ctx->io_count);
|
2010-10-28 21:10:37 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/* Initialize the request struct. Initially, we assume a
|
|
|
|
* long write-back delay. This will be adjusted in
|
|
|
|
* update_nfs_request below if the region is not locked. */
|
|
|
|
req->wb_page = page;
|
2012-08-01 06:45:06 +07:00
|
|
|
req->wb_index = page_file_index(page);
|
2005-04-17 05:20:36 +07:00
|
|
|
page_cache_get(page);
|
|
|
|
req->wb_offset = offset;
|
|
|
|
req->wb_pgbase = offset;
|
|
|
|
req->wb_bytes = count;
|
|
|
|
req->wb_context = get_nfs_open_context(ctx);
|
2007-06-18 00:26:38 +07:00
|
|
|
kref_init(&req->wb_kref);
|
2014-05-15 22:56:45 +07:00
|
|
|
nfs_page_group_init(req, last);
|
2005-04-17 05:20:36 +07:00
|
|
|
return req;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2012-05-10 01:04:55 +07:00
|
|
|
* nfs_unlock_request - Unlock request and wake up sleepers.
|
2005-04-17 05:20:36 +07:00
|
|
|
* @req:
|
|
|
|
*/
|
2012-05-10 01:04:55 +07:00
|
|
|
void nfs_unlock_request(struct nfs_page *req)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
if (!NFS_WBACK_BUSY(req)) {
|
|
|
|
printk(KERN_ERR "NFS: Invalid unlock attempted\n");
|
|
|
|
BUG();
|
|
|
|
}
|
2014-03-18 00:06:10 +07:00
|
|
|
smp_mb__before_atomic();
|
2005-04-17 05:20:36 +07:00
|
|
|
clear_bit(PG_BUSY, &req->wb_flags);
|
2014-03-18 00:06:10 +07:00
|
|
|
smp_mb__after_atomic();
|
2005-06-23 00:16:21 +07:00
|
|
|
wake_up_bit(&req->wb_flags, PG_BUSY);
|
2012-05-10 01:30:35 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2012-05-10 01:04:55 +07:00
|
|
|
* nfs_unlock_and_release_request - Unlock request and release the nfs_page
|
|
|
|
* @req:
|
2012-05-10 01:30:35 +07:00
|
|
|
*/
|
2012-05-10 01:04:55 +07:00
|
|
|
void nfs_unlock_and_release_request(struct nfs_page *req)
|
2012-05-10 01:30:35 +07:00
|
|
|
{
|
2012-05-10 01:04:55 +07:00
|
|
|
nfs_unlock_request(req);
|
2005-04-17 05:20:36 +07:00
|
|
|
nfs_release_request(req);
|
|
|
|
}
|
|
|
|
|
2011-03-26 01:15:11 +07:00
|
|
|
/*
|
2005-04-17 05:20:36 +07:00
|
|
|
* nfs_clear_request - Free up all resources allocated to the request
|
|
|
|
* @req:
|
|
|
|
*
|
2010-03-11 21:19:35 +07:00
|
|
|
* Release page and open context resources associated with a read/write
|
|
|
|
* request after it has completed.
|
2005-04-17 05:20:36 +07:00
|
|
|
*/
|
2011-03-26 01:15:11 +07:00
|
|
|
static void nfs_clear_request(struct nfs_page *req)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2006-03-21 01:44:04 +07:00
|
|
|
struct page *page = req->wb_page;
|
2010-03-11 21:19:35 +07:00
|
|
|
struct nfs_open_context *ctx = req->wb_context;
|
2010-06-26 03:35:53 +07:00
|
|
|
struct nfs_lock_context *l_ctx = req->wb_lock_context;
|
2010-03-11 21:19:35 +07:00
|
|
|
|
2006-03-21 01:44:04 +07:00
|
|
|
if (page != NULL) {
|
|
|
|
page_cache_release(page);
|
2005-04-17 05:20:36 +07:00
|
|
|
req->wb_page = NULL;
|
|
|
|
}
|
2010-06-26 03:35:53 +07:00
|
|
|
if (l_ctx != NULL) {
|
2013-04-09 08:38:12 +07:00
|
|
|
nfs_iocounter_dec(&l_ctx->io_count);
|
2010-06-26 03:35:53 +07:00
|
|
|
nfs_put_lock_context(l_ctx);
|
|
|
|
req->wb_lock_context = NULL;
|
|
|
|
}
|
2010-03-11 21:19:35 +07:00
|
|
|
if (ctx != NULL) {
|
|
|
|
put_nfs_open_context(ctx);
|
|
|
|
req->wb_context = NULL;
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* nfs_release_request - Release the count on an NFS read/write request
|
|
|
|
* @req: request to release
|
|
|
|
*
|
|
|
|
* Note: Should never be called with the spinlock held!
|
|
|
|
*/
|
2014-07-11 21:20:48 +07:00
|
|
|
void nfs_free_request(struct nfs_page *req)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2014-05-15 22:56:45 +07:00
|
|
|
WARN_ON_ONCE(req->wb_this_page != req);
|
|
|
|
|
|
|
|
/* extra debug: make sure no sync bits are still set */
|
|
|
|
WARN_ON_ONCE(test_bit(PG_TEARDOWN, &req->wb_flags));
|
2014-05-15 22:56:46 +07:00
|
|
|
WARN_ON_ONCE(test_bit(PG_UNLOCKPAGE, &req->wb_flags));
|
|
|
|
WARN_ON_ONCE(test_bit(PG_UPTODATE, &req->wb_flags));
|
2014-05-15 22:56:47 +07:00
|
|
|
WARN_ON_ONCE(test_bit(PG_WB_END, &req->wb_flags));
|
|
|
|
WARN_ON_ONCE(test_bit(PG_REMOVE, &req->wb_flags));
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2010-03-11 21:19:35 +07:00
|
|
|
/* Release struct file and open context */
|
2005-04-17 05:20:36 +07:00
|
|
|
nfs_clear_request(req);
|
|
|
|
nfs_page_free(req);
|
|
|
|
}
|
|
|
|
|
2007-06-18 00:26:38 +07:00
|
|
|
void nfs_release_request(struct nfs_page *req)
|
|
|
|
{
|
2014-05-15 22:56:45 +07:00
|
|
|
kref_put(&req->wb_kref, nfs_page_group_destroy);
|
2010-02-03 20:27:22 +07:00
|
|
|
}
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/**
|
|
|
|
* nfs_wait_on_request - Wait for a request to complete.
|
|
|
|
* @req: request to wait upon.
|
|
|
|
*
|
2007-12-07 04:24:39 +07:00
|
|
|
* Interruptible by fatal signals only.
|
2005-04-17 05:20:36 +07:00
|
|
|
* The user is responsible for holding a count on the request.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
nfs_wait_on_request(struct nfs_page *req)
|
|
|
|
{
|
sched: Remove proliferation of wait_on_bit() action functions
The current "wait_on_bit" interface requires an 'action'
function to be provided which does the actual waiting.
There are over 20 such functions, many of them identical.
Most cases can be satisfied by one of just two functions, one
which uses io_schedule() and one which just uses schedule().
So:
Rename wait_on_bit and wait_on_bit_lock to
wait_on_bit_action and wait_on_bit_lock_action
to make it explicit that they need an action function.
Introduce new wait_on_bit{,_lock} and wait_on_bit{,_lock}_io
which are *not* given an action function but implicitly use
a standard one.
The decision to error-out if a signal is pending is now made
based on the 'mode' argument rather than being encoded in the action
function.
All instances of the old wait_on_bit and wait_on_bit_lock which
can use the new version have been changed accordingly and their
action functions have been discarded.
wait_on_bit{_lock} does not return any specific error code in the
event of a signal so the caller must check for non-zero and
interpolate their own error code as appropriate.
The wait_on_bit() call in __fscache_wait_on_invalidate() was
ambiguous as it specified TASK_UNINTERRUPTIBLE but used
fscache_wait_bit_interruptible as an action function.
David Howells confirms this should be uniformly
"uninterruptible"
The main remaining user of wait_on_bit{,_lock}_action is NFS
which needs to use a freezer-aware schedule() call.
A comment in fs/gfs2/glock.c notes that having multiple 'action'
functions is useful as they display differently in the 'wchan'
field of 'ps'. (and /proc/$PID/wchan).
As the new bit_wait{,_io} functions are tagged "__sched", they
will not show up at all, but something higher in the stack. So
the distinction will still be visible, only with different
function names (gds2_glock_wait versus gfs2_glock_dq_wait in the
gfs2/glock.c case).
Since first version of this patch (against 3.15) two new action
functions appeared, on in NFS and one in CIFS. CIFS also now
uses an action function that makes the same freezer aware
schedule call as NFS.
Signed-off-by: NeilBrown <neilb@suse.de>
Acked-by: David Howells <dhowells@redhat.com> (fscache, keys)
Acked-by: Steven Whitehouse <swhiteho@redhat.com> (gfs2)
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Steve French <sfrench@samba.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: http://lkml.kernel.org/r/20140707051603.28027.72349.stgit@notabene.brown
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2014-07-07 12:16:04 +07:00
|
|
|
return wait_on_bit_io(&req->wb_flags, PG_BUSY,
|
|
|
|
TASK_UNINTERRUPTIBLE);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2014-05-15 22:56:43 +07:00
|
|
|
/*
|
|
|
|
* nfs_generic_pg_test - determine if requests can be coalesced
|
|
|
|
* @desc: pointer to descriptor
|
|
|
|
* @prev: previous request in desc, or NULL
|
|
|
|
* @req: this request
|
|
|
|
*
|
|
|
|
* Returns zero if @req can be coalesced into @desc, otherwise it returns
|
|
|
|
* the size of the request.
|
|
|
|
*/
|
|
|
|
size_t nfs_generic_pg_test(struct nfs_pageio_descriptor *desc,
|
|
|
|
struct nfs_page *prev, struct nfs_page *req)
|
2011-05-29 15:45:39 +07:00
|
|
|
{
|
2014-05-15 22:56:52 +07:00
|
|
|
if (desc->pg_count > desc->pg_bsize) {
|
|
|
|
/* should never happen */
|
|
|
|
WARN_ON_ONCE(1);
|
2011-05-29 15:45:39 +07:00
|
|
|
return 0;
|
2014-05-15 22:56:52 +07:00
|
|
|
}
|
2011-05-29 15:45:39 +07:00
|
|
|
|
2014-08-21 23:09:17 +07:00
|
|
|
/*
|
|
|
|
* Limit the request size so that we can still allocate a page array
|
|
|
|
* for it without upsetting the slab allocator.
|
|
|
|
*/
|
|
|
|
if (((desc->pg_count + req->wb_bytes) >> PAGE_SHIFT) *
|
|
|
|
sizeof(struct page) > PAGE_SIZE)
|
|
|
|
return 0;
|
|
|
|
|
2014-05-15 22:56:52 +07:00
|
|
|
return min(desc->pg_bsize - desc->pg_count, (size_t)req->wb_bytes);
|
2011-05-29 15:45:39 +07:00
|
|
|
}
|
2011-06-20 05:33:46 +07:00
|
|
|
EXPORT_SYMBOL_GPL(nfs_generic_pg_test);
|
2011-05-29 15:45:39 +07:00
|
|
|
|
2014-06-09 22:48:33 +07:00
|
|
|
struct nfs_pgio_header *nfs_pgio_header_alloc(const struct nfs_rw_ops *ops)
|
2014-05-06 20:12:30 +07:00
|
|
|
{
|
2014-06-09 22:48:33 +07:00
|
|
|
struct nfs_pgio_header *hdr = ops->rw_alloc_header();
|
2014-05-06 20:12:30 +07:00
|
|
|
|
2014-06-09 22:48:33 +07:00
|
|
|
if (hdr) {
|
2014-05-06 20:12:30 +07:00
|
|
|
INIT_LIST_HEAD(&hdr->pages);
|
|
|
|
spin_lock_init(&hdr->lock);
|
|
|
|
hdr->rw_ops = ops;
|
|
|
|
}
|
2014-06-09 22:48:33 +07:00
|
|
|
return hdr;
|
2014-05-06 20:12:30 +07:00
|
|
|
}
|
2014-06-09 22:48:33 +07:00
|
|
|
EXPORT_SYMBOL_GPL(nfs_pgio_header_alloc);
|
2014-05-06 20:12:30 +07:00
|
|
|
|
|
|
|
/*
|
2014-06-09 22:48:33 +07:00
|
|
|
* nfs_pgio_header_free - Free a read or write header
|
2014-05-06 20:12:30 +07:00
|
|
|
* @hdr: The header to free
|
|
|
|
*/
|
2014-06-09 22:48:33 +07:00
|
|
|
void nfs_pgio_header_free(struct nfs_pgio_header *hdr)
|
2014-05-06 20:12:30 +07:00
|
|
|
{
|
2014-06-09 22:48:33 +07:00
|
|
|
hdr->rw_ops->rw_free_header(hdr);
|
2014-05-06 20:12:30 +07:00
|
|
|
}
|
2014-06-09 22:48:33 +07:00
|
|
|
EXPORT_SYMBOL_GPL(nfs_pgio_header_free);
|
2014-05-06 20:12:30 +07:00
|
|
|
|
2014-05-06 20:12:29 +07:00
|
|
|
/**
|
2014-06-09 22:48:37 +07:00
|
|
|
* nfs_pgio_data_destroy - make @hdr suitable for reuse
|
|
|
|
*
|
|
|
|
* Frees memory and releases refs from nfs_generic_pgio, so that it may
|
|
|
|
* be called again.
|
|
|
|
*
|
|
|
|
* @hdr: A header that has had nfs_generic_pgio called
|
2014-05-06 20:12:29 +07:00
|
|
|
*/
|
2014-06-09 22:48:35 +07:00
|
|
|
void nfs_pgio_data_destroy(struct nfs_pgio_header *hdr)
|
2014-05-06 20:12:29 +07:00
|
|
|
{
|
2014-06-09 22:48:35 +07:00
|
|
|
put_nfs_open_context(hdr->args.context);
|
|
|
|
if (hdr->page_array.pagevec != hdr->page_array.page_array)
|
|
|
|
kfree(hdr->page_array.pagevec);
|
2014-05-06 20:12:29 +07:00
|
|
|
}
|
2014-06-09 22:48:33 +07:00
|
|
|
EXPORT_SYMBOL_GPL(nfs_pgio_data_destroy);
|
2014-05-06 20:12:29 +07:00
|
|
|
|
2014-05-06 20:12:34 +07:00
|
|
|
/**
|
|
|
|
* nfs_pgio_rpcsetup - Set up arguments for a pageio call
|
2014-06-09 22:48:35 +07:00
|
|
|
* @hdr: The pageio hdr
|
2014-05-06 20:12:34 +07:00
|
|
|
* @count: Number of bytes to read
|
|
|
|
* @offset: Initial offset
|
|
|
|
* @how: How to commit data (writes only)
|
|
|
|
* @cinfo: Commit information for the call (writes only)
|
|
|
|
*/
|
2014-06-09 22:48:35 +07:00
|
|
|
static void nfs_pgio_rpcsetup(struct nfs_pgio_header *hdr,
|
2014-05-06 20:12:34 +07:00
|
|
|
unsigned int count, unsigned int offset,
|
|
|
|
int how, struct nfs_commit_info *cinfo)
|
|
|
|
{
|
2014-06-09 22:48:35 +07:00
|
|
|
struct nfs_page *req = hdr->req;
|
2014-05-06 20:12:34 +07:00
|
|
|
|
|
|
|
/* Set up the RPC argument and reply structs
|
2014-06-09 22:48:35 +07:00
|
|
|
* NB: take care not to mess about with hdr->commit et al. */
|
2014-05-06 20:12:34 +07:00
|
|
|
|
2014-06-09 22:48:35 +07:00
|
|
|
hdr->args.fh = NFS_FH(hdr->inode);
|
|
|
|
hdr->args.offset = req_offset(req) + offset;
|
2014-05-06 20:12:34 +07:00
|
|
|
/* pnfs_set_layoutcommit needs this */
|
2014-06-09 22:48:35 +07:00
|
|
|
hdr->mds_offset = hdr->args.offset;
|
|
|
|
hdr->args.pgbase = req->wb_pgbase + offset;
|
|
|
|
hdr->args.pages = hdr->page_array.pagevec;
|
|
|
|
hdr->args.count = count;
|
|
|
|
hdr->args.context = get_nfs_open_context(req->wb_context);
|
|
|
|
hdr->args.lock_context = req->wb_lock_context;
|
|
|
|
hdr->args.stable = NFS_UNSTABLE;
|
2014-05-06 20:12:34 +07:00
|
|
|
switch (how & (FLUSH_STABLE | FLUSH_COND_STABLE)) {
|
|
|
|
case 0:
|
|
|
|
break;
|
|
|
|
case FLUSH_COND_STABLE:
|
|
|
|
if (nfs_reqs_to_commit(cinfo))
|
|
|
|
break;
|
|
|
|
default:
|
2014-06-09 22:48:35 +07:00
|
|
|
hdr->args.stable = NFS_FILE_SYNC;
|
2014-05-06 20:12:34 +07:00
|
|
|
}
|
|
|
|
|
2014-06-09 22:48:35 +07:00
|
|
|
hdr->res.fattr = &hdr->fattr;
|
|
|
|
hdr->res.count = count;
|
|
|
|
hdr->res.eof = 0;
|
2014-06-09 22:48:36 +07:00
|
|
|
hdr->res.verf = &hdr->verf;
|
2014-06-09 22:48:35 +07:00
|
|
|
nfs_fattr_init(&hdr->fattr);
|
2014-05-06 20:12:34 +07:00
|
|
|
}
|
|
|
|
|
2014-05-06 20:12:31 +07:00
|
|
|
/**
|
2014-06-09 22:48:35 +07:00
|
|
|
* nfs_pgio_prepare - Prepare pageio hdr to go over the wire
|
2014-05-06 20:12:31 +07:00
|
|
|
* @task: The current task
|
2014-06-09 22:48:35 +07:00
|
|
|
* @calldata: pageio header to prepare
|
2014-05-06 20:12:31 +07:00
|
|
|
*/
|
2014-05-06 20:12:33 +07:00
|
|
|
static void nfs_pgio_prepare(struct rpc_task *task, void *calldata)
|
2014-05-06 20:12:31 +07:00
|
|
|
{
|
2014-06-09 22:48:35 +07:00
|
|
|
struct nfs_pgio_header *hdr = calldata;
|
2014-05-06 20:12:31 +07:00
|
|
|
int err;
|
2014-06-09 22:48:35 +07:00
|
|
|
err = NFS_PROTO(hdr->inode)->pgio_rpc_prepare(task, hdr);
|
2014-05-06 20:12:31 +07:00
|
|
|
if (err)
|
|
|
|
rpc_exit(task, err);
|
|
|
|
}
|
|
|
|
|
2014-06-09 22:48:35 +07:00
|
|
|
int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr,
|
2014-05-06 20:12:37 +07:00
|
|
|
const struct rpc_call_ops *call_ops, int how, int flags)
|
|
|
|
{
|
|
|
|
struct rpc_task *task;
|
|
|
|
struct rpc_message msg = {
|
2014-06-09 22:48:35 +07:00
|
|
|
.rpc_argp = &hdr->args,
|
|
|
|
.rpc_resp = &hdr->res,
|
|
|
|
.rpc_cred = hdr->cred,
|
2014-05-06 20:12:37 +07:00
|
|
|
};
|
|
|
|
struct rpc_task_setup task_setup_data = {
|
|
|
|
.rpc_client = clnt,
|
2014-06-09 22:48:35 +07:00
|
|
|
.task = &hdr->task,
|
2014-05-06 20:12:37 +07:00
|
|
|
.rpc_message = &msg,
|
|
|
|
.callback_ops = call_ops,
|
2014-06-09 22:48:35 +07:00
|
|
|
.callback_data = hdr,
|
2014-05-06 20:12:37 +07:00
|
|
|
.workqueue = nfsiod_workqueue,
|
|
|
|
.flags = RPC_TASK_ASYNC | flags,
|
|
|
|
};
|
|
|
|
int ret = 0;
|
|
|
|
|
2014-06-09 22:48:35 +07:00
|
|
|
hdr->rw_ops->rw_initiate(hdr, &msg, &task_setup_data, how);
|
2014-05-06 20:12:37 +07:00
|
|
|
|
|
|
|
dprintk("NFS: %5u initiated pgio call "
|
|
|
|
"(req %s/%llu, %u bytes @ offset %llu)\n",
|
2014-06-09 22:48:35 +07:00
|
|
|
hdr->task.tk_pid,
|
2014-06-21 00:30:26 +07:00
|
|
|
hdr->inode->i_sb->s_id,
|
|
|
|
(unsigned long long)NFS_FILEID(hdr->inode),
|
2014-06-09 22:48:35 +07:00
|
|
|
hdr->args.count,
|
|
|
|
(unsigned long long)hdr->args.offset);
|
2014-05-06 20:12:37 +07:00
|
|
|
|
|
|
|
task = rpc_run_task(&task_setup_data);
|
|
|
|
if (IS_ERR(task)) {
|
|
|
|
ret = PTR_ERR(task);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
if (how & FLUSH_SYNC) {
|
|
|
|
ret = rpc_wait_for_completion_task(task);
|
|
|
|
if (ret == 0)
|
|
|
|
ret = task->tk_status;
|
|
|
|
}
|
|
|
|
rpc_put_task(task);
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(nfs_initiate_pgio);
|
|
|
|
|
2014-05-06 20:12:35 +07:00
|
|
|
/**
|
|
|
|
* nfs_pgio_error - Clean up from a pageio error
|
|
|
|
* @desc: IO descriptor
|
|
|
|
* @hdr: pageio header
|
|
|
|
*/
|
2014-05-06 20:12:36 +07:00
|
|
|
static int nfs_pgio_error(struct nfs_pageio_descriptor *desc,
|
2014-05-06 20:12:35 +07:00
|
|
|
struct nfs_pgio_header *hdr)
|
|
|
|
{
|
|
|
|
set_bit(NFS_IOHDR_REDO, &hdr->flags);
|
2014-06-09 22:48:35 +07:00
|
|
|
nfs_pgio_data_destroy(hdr);
|
2014-06-09 22:48:37 +07:00
|
|
|
hdr->completion_ops->completion(hdr);
|
2014-05-06 20:12:35 +07:00
|
|
|
desc->pg_completion_ops->error_cleanup(&desc->pg_list);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2014-05-06 20:12:31 +07:00
|
|
|
/**
|
|
|
|
* nfs_pgio_release - Release pageio data
|
2014-06-09 22:48:35 +07:00
|
|
|
* @calldata: The pageio header to release
|
2014-05-06 20:12:31 +07:00
|
|
|
*/
|
2014-05-06 20:12:33 +07:00
|
|
|
static void nfs_pgio_release(void *calldata)
|
2014-05-06 20:12:31 +07:00
|
|
|
{
|
2014-06-09 22:48:35 +07:00
|
|
|
struct nfs_pgio_header *hdr = calldata;
|
|
|
|
if (hdr->rw_ops->rw_release)
|
|
|
|
hdr->rw_ops->rw_release(hdr);
|
|
|
|
nfs_pgio_data_destroy(hdr);
|
2014-06-09 22:48:37 +07:00
|
|
|
hdr->completion_ops->completion(hdr);
|
2014-05-06 20:12:31 +07:00
|
|
|
}
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/**
|
2007-04-03 05:48:28 +07:00
|
|
|
* nfs_pageio_init - initialise a page io descriptor
|
|
|
|
* @desc: pointer to descriptor
|
2007-04-03 05:48:28 +07:00
|
|
|
* @inode: pointer to inode
|
|
|
|
* @doio: pointer to io function
|
|
|
|
* @bsize: io block size
|
|
|
|
* @io_flags: extra parameters for the io function
|
2007-04-03 05:48:28 +07:00
|
|
|
*/
|
2007-04-03 05:48:28 +07:00
|
|
|
void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
|
|
|
|
struct inode *inode,
|
2011-06-11 00:30:23 +07:00
|
|
|
const struct nfs_pageio_ops *pg_ops,
|
2012-04-21 01:47:48 +07:00
|
|
|
const struct nfs_pgio_completion_ops *compl_ops,
|
2014-05-06 20:12:30 +07:00
|
|
|
const struct nfs_rw_ops *rw_ops,
|
2007-05-05 01:44:06 +07:00
|
|
|
size_t bsize,
|
2007-04-03 05:48:28 +07:00
|
|
|
int io_flags)
|
2007-04-03 05:48:28 +07:00
|
|
|
{
|
|
|
|
INIT_LIST_HEAD(&desc->pg_list);
|
2007-04-03 05:48:28 +07:00
|
|
|
desc->pg_bytes_written = 0;
|
2007-04-03 05:48:28 +07:00
|
|
|
desc->pg_count = 0;
|
|
|
|
desc->pg_bsize = bsize;
|
|
|
|
desc->pg_base = 0;
|
2011-03-22 04:02:00 +07:00
|
|
|
desc->pg_moreio = 0;
|
2011-07-13 00:42:02 +07:00
|
|
|
desc->pg_recoalesce = 0;
|
2007-04-03 05:48:28 +07:00
|
|
|
desc->pg_inode = inode;
|
2011-06-11 00:30:23 +07:00
|
|
|
desc->pg_ops = pg_ops;
|
2012-04-21 01:47:48 +07:00
|
|
|
desc->pg_completion_ops = compl_ops;
|
2014-05-06 20:12:30 +07:00
|
|
|
desc->pg_rw_ops = rw_ops;
|
2007-04-03 05:48:28 +07:00
|
|
|
desc->pg_ioflags = io_flags;
|
|
|
|
desc->pg_error = 0;
|
2011-03-01 08:34:14 +07:00
|
|
|
desc->pg_lseg = NULL;
|
2012-04-21 01:47:51 +07:00
|
|
|
desc->pg_dreq = NULL;
|
2012-08-02 19:36:09 +07:00
|
|
|
desc->pg_layout_private = NULL;
|
2007-04-03 05:48:28 +07:00
|
|
|
}
|
2012-07-31 03:05:25 +07:00
|
|
|
EXPORT_SYMBOL_GPL(nfs_pageio_init);
|
2007-04-03 05:48:28 +07:00
|
|
|
|
2014-05-06 20:12:32 +07:00
|
|
|
/**
|
|
|
|
* nfs_pgio_result - Basic pageio error handling
|
|
|
|
* @task: The task that ran
|
2014-06-09 22:48:35 +07:00
|
|
|
* @calldata: Pageio header to check
|
2014-05-06 20:12:32 +07:00
|
|
|
*/
|
2014-05-06 20:12:33 +07:00
|
|
|
static void nfs_pgio_result(struct rpc_task *task, void *calldata)
|
2014-05-06 20:12:32 +07:00
|
|
|
{
|
2014-06-09 22:48:35 +07:00
|
|
|
struct nfs_pgio_header *hdr = calldata;
|
|
|
|
struct inode *inode = hdr->inode;
|
2014-05-06 20:12:32 +07:00
|
|
|
|
|
|
|
dprintk("NFS: %s: %5u, (status %d)\n", __func__,
|
|
|
|
task->tk_pid, task->tk_status);
|
|
|
|
|
2014-06-09 22:48:35 +07:00
|
|
|
if (hdr->rw_ops->rw_done(task, hdr, inode) != 0)
|
2014-05-06 20:12:32 +07:00
|
|
|
return;
|
|
|
|
if (task->tk_status < 0)
|
2014-06-09 22:48:35 +07:00
|
|
|
nfs_set_pgio_error(hdr, task->tk_status, hdr->args.offset);
|
2014-05-06 20:12:32 +07:00
|
|
|
else
|
2014-06-09 22:48:35 +07:00
|
|
|
hdr->rw_ops->rw_result(task, hdr);
|
2014-05-06 20:12:32 +07:00
|
|
|
}
|
|
|
|
|
2014-05-06 20:12:36 +07:00
|
|
|
/*
|
|
|
|
* Create an RPC task for the given read or write request and kick it.
|
|
|
|
* The page must have been locked by the caller.
|
|
|
|
*
|
|
|
|
* It may happen that the page we're passed is not marked dirty.
|
|
|
|
* This is the case if nfs_updatepage detects a conflicting request
|
|
|
|
* that has been written but not committed.
|
|
|
|
*/
|
2014-05-15 22:56:52 +07:00
|
|
|
int nfs_generic_pgio(struct nfs_pageio_descriptor *desc,
|
|
|
|
struct nfs_pgio_header *hdr)
|
2014-05-06 20:12:36 +07:00
|
|
|
{
|
|
|
|
struct nfs_page *req;
|
2014-08-15 04:39:32 +07:00
|
|
|
struct page **pages,
|
|
|
|
*last_page;
|
2014-05-06 20:12:36 +07:00
|
|
|
struct list_head *head = &desc->pg_list;
|
|
|
|
struct nfs_commit_info cinfo;
|
2014-08-15 04:39:32 +07:00
|
|
|
unsigned int pagecount, pageused;
|
2014-05-06 20:12:36 +07:00
|
|
|
|
2014-06-09 22:48:37 +07:00
|
|
|
pagecount = nfs_page_array_len(desc->pg_base, desc->pg_count);
|
|
|
|
if (!nfs_pgarray_set(&hdr->page_array, pagecount))
|
2014-05-06 20:12:36 +07:00
|
|
|
return nfs_pgio_error(desc, hdr);
|
|
|
|
|
|
|
|
nfs_init_cinfo(&cinfo, desc->pg_inode, desc->pg_dreq);
|
2014-06-09 22:48:35 +07:00
|
|
|
pages = hdr->page_array.pagevec;
|
2014-08-15 04:39:32 +07:00
|
|
|
last_page = NULL;
|
|
|
|
pageused = 0;
|
2014-05-06 20:12:36 +07:00
|
|
|
while (!list_empty(head)) {
|
|
|
|
req = nfs_list_entry(head->next);
|
|
|
|
nfs_list_remove_request(req);
|
|
|
|
nfs_list_add_request(req, &hdr->pages);
|
2014-08-15 04:39:32 +07:00
|
|
|
|
|
|
|
if (WARN_ON_ONCE(pageused >= pagecount))
|
|
|
|
return nfs_pgio_error(desc, hdr);
|
|
|
|
|
|
|
|
if (!last_page || last_page != req->wb_page) {
|
|
|
|
*pages++ = last_page = req->wb_page;
|
|
|
|
pageused++;
|
|
|
|
}
|
2014-05-06 20:12:36 +07:00
|
|
|
}
|
2014-08-15 04:39:32 +07:00
|
|
|
if (WARN_ON_ONCE(pageused != pagecount))
|
|
|
|
return nfs_pgio_error(desc, hdr);
|
2014-05-06 20:12:36 +07:00
|
|
|
|
|
|
|
if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
|
|
|
|
(desc->pg_moreio || nfs_reqs_to_commit(&cinfo)))
|
|
|
|
desc->pg_ioflags &= ~FLUSH_COND_STABLE;
|
|
|
|
|
|
|
|
/* Set up the argument struct */
|
2014-06-09 22:48:35 +07:00
|
|
|
nfs_pgio_rpcsetup(hdr, desc->pg_count, 0, desc->pg_ioflags, &cinfo);
|
2014-05-06 20:12:36 +07:00
|
|
|
desc->pg_rpc_callops = &nfs_pgio_common_ops;
|
|
|
|
return 0;
|
|
|
|
}
|
2014-05-15 22:56:52 +07:00
|
|
|
EXPORT_SYMBOL_GPL(nfs_generic_pgio);
|
2014-05-06 20:12:36 +07:00
|
|
|
|
2014-05-06 20:12:40 +07:00
|
|
|
static int nfs_generic_pg_pgios(struct nfs_pageio_descriptor *desc)
|
2014-05-06 20:12:39 +07:00
|
|
|
{
|
|
|
|
struct nfs_pgio_header *hdr;
|
|
|
|
int ret;
|
|
|
|
|
2014-06-09 22:48:33 +07:00
|
|
|
hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
|
|
|
|
if (!hdr) {
|
2014-05-06 20:12:39 +07:00
|
|
|
desc->pg_completion_ops->error_cleanup(&desc->pg_list);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2014-06-09 22:48:33 +07:00
|
|
|
nfs_pgheader_init(desc, hdr, nfs_pgio_header_free);
|
2014-05-06 20:12:39 +07:00
|
|
|
ret = nfs_generic_pgio(desc, hdr);
|
|
|
|
if (ret == 0)
|
2014-05-15 22:56:53 +07:00
|
|
|
ret = nfs_initiate_pgio(NFS_CLIENT(hdr->inode),
|
2014-06-09 22:48:35 +07:00
|
|
|
hdr, desc->pg_rpc_callops,
|
2014-05-15 22:56:53 +07:00
|
|
|
desc->pg_ioflags, 0);
|
2014-05-06 20:12:39 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2013-09-06 22:09:38 +07:00
|
|
|
static bool nfs_match_open_context(const struct nfs_open_context *ctx1,
|
|
|
|
const struct nfs_open_context *ctx2)
|
|
|
|
{
|
|
|
|
return ctx1->cred == ctx2->cred && ctx1->state == ctx2->state;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool nfs_match_lock_context(const struct nfs_lock_context *l1,
|
|
|
|
const struct nfs_lock_context *l2)
|
|
|
|
{
|
|
|
|
return l1->lockowner.l_owner == l2->lockowner.l_owner
|
|
|
|
&& l1->lockowner.l_pid == l2->lockowner.l_pid;
|
|
|
|
}
|
|
|
|
|
2007-04-03 05:48:28 +07:00
|
|
|
/**
|
|
|
|
* nfs_can_coalesce_requests - test two requests for compatibility
|
|
|
|
* @prev: pointer to nfs_page
|
|
|
|
* @req: pointer to nfs_page
|
|
|
|
*
|
|
|
|
* The nfs_page structures 'prev' and 'req' are compared to ensure that the
|
|
|
|
* page data area they describe is contiguous, and that their RPC
|
|
|
|
* credentials, NFSv4 open state, and lockowners are the same.
|
|
|
|
*
|
|
|
|
* Return 'true' if this is the case, else return 'false'.
|
|
|
|
*/
|
2011-05-26 01:03:56 +07:00
|
|
|
static bool nfs_can_coalesce_requests(struct nfs_page *prev,
|
|
|
|
struct nfs_page *req,
|
|
|
|
struct nfs_pageio_descriptor *pgio)
|
2007-04-03 05:48:28 +07:00
|
|
|
{
|
2014-05-15 22:56:43 +07:00
|
|
|
size_t size;
|
|
|
|
|
2014-05-15 22:56:44 +07:00
|
|
|
if (prev) {
|
|
|
|
if (!nfs_match_open_context(req->wb_context, prev->wb_context))
|
|
|
|
return false;
|
|
|
|
if (req->wb_context->dentry->d_inode->i_flock != NULL &&
|
|
|
|
!nfs_match_lock_context(req->wb_lock_context,
|
|
|
|
prev->wb_lock_context))
|
|
|
|
return false;
|
|
|
|
if (req_offset(req) != req_offset(prev) + prev->wb_bytes)
|
|
|
|
return false;
|
2014-08-15 04:39:33 +07:00
|
|
|
if (req->wb_page == prev->wb_page) {
|
|
|
|
if (req->wb_pgbase != prev->wb_pgbase + prev->wb_bytes)
|
|
|
|
return false;
|
|
|
|
} else {
|
|
|
|
if (req->wb_pgbase != 0 ||
|
|
|
|
prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE)
|
|
|
|
return false;
|
|
|
|
}
|
2014-05-15 22:56:44 +07:00
|
|
|
}
|
2014-05-15 22:56:43 +07:00
|
|
|
size = pgio->pg_ops->pg_test(pgio, prev, req);
|
2014-05-15 22:56:52 +07:00
|
|
|
WARN_ON_ONCE(size > req->wb_bytes);
|
|
|
|
if (size && size < req->wb_bytes)
|
|
|
|
req->wb_bytes = size;
|
2014-05-15 22:56:43 +07:00
|
|
|
return size > 0;
|
2007-04-03 05:48:28 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2007-04-03 05:48:28 +07:00
|
|
|
* nfs_pageio_do_add_request - Attempt to coalesce a request into a page list.
|
2007-04-03 05:48:28 +07:00
|
|
|
* @desc: destination io descriptor
|
|
|
|
* @req: request
|
|
|
|
*
|
|
|
|
* Returns true if the request 'req' was successfully coalesced into the
|
|
|
|
* existing list of pages 'desc'.
|
|
|
|
*/
|
2007-04-03 05:48:28 +07:00
|
|
|
static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
|
|
|
|
struct nfs_page *req)
|
2007-04-03 05:48:28 +07:00
|
|
|
{
|
2014-05-15 22:56:44 +07:00
|
|
|
struct nfs_page *prev = NULL;
|
2007-04-03 05:48:28 +07:00
|
|
|
if (desc->pg_count != 0) {
|
|
|
|
prev = nfs_list_entry(desc->pg_list.prev);
|
2011-05-29 15:45:39 +07:00
|
|
|
} else {
|
2011-06-11 00:30:23 +07:00
|
|
|
if (desc->pg_ops->pg_init)
|
|
|
|
desc->pg_ops->pg_init(desc, req);
|
2007-04-03 05:48:28 +07:00
|
|
|
desc->pg_base = req->wb_pgbase;
|
2011-05-29 15:45:39 +07:00
|
|
|
}
|
2014-05-15 22:56:44 +07:00
|
|
|
if (!nfs_can_coalesce_requests(prev, req, desc))
|
|
|
|
return 0;
|
2007-04-03 05:48:28 +07:00
|
|
|
nfs_list_remove_request(req);
|
|
|
|
nfs_list_add_request(req, &desc->pg_list);
|
2011-05-29 15:45:39 +07:00
|
|
|
desc->pg_count += req->wb_bytes;
|
2007-04-03 05:48:28 +07:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2007-04-03 05:48:28 +07:00
|
|
|
/*
|
|
|
|
* Helper for nfs_pageio_add_request and nfs_pageio_complete
|
|
|
|
*/
|
|
|
|
static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc)
|
|
|
|
{
|
|
|
|
if (!list_empty(&desc->pg_list)) {
|
2011-06-11 00:30:23 +07:00
|
|
|
int error = desc->pg_ops->pg_doio(desc);
|
2007-04-03 05:48:28 +07:00
|
|
|
if (error < 0)
|
|
|
|
desc->pg_error = error;
|
|
|
|
else
|
|
|
|
desc->pg_bytes_written += desc->pg_count;
|
|
|
|
}
|
|
|
|
if (list_empty(&desc->pg_list)) {
|
|
|
|
desc->pg_count = 0;
|
|
|
|
desc->pg_base = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* nfs_pageio_add_request - Attempt to coalesce a request into a page list.
|
|
|
|
* @desc: destination io descriptor
|
|
|
|
* @req: request
|
|
|
|
*
|
2014-05-15 22:56:45 +07:00
|
|
|
* This may split a request into subrequests which are all part of the
|
|
|
|
* same page group.
|
|
|
|
*
|
2007-04-03 05:48:28 +07:00
|
|
|
* Returns true if the request 'req' was successfully coalesced into the
|
|
|
|
* existing list of pages 'desc'.
|
|
|
|
*/
|
2011-07-13 00:42:02 +07:00
|
|
|
static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
|
2007-04-03 05:48:28 +07:00
|
|
|
struct nfs_page *req)
|
2007-04-03 05:48:28 +07:00
|
|
|
{
|
2014-05-15 22:56:45 +07:00
|
|
|
struct nfs_page *subreq;
|
|
|
|
unsigned int bytes_left = 0;
|
|
|
|
unsigned int offset, pgbase;
|
|
|
|
|
2014-08-08 22:00:55 +07:00
|
|
|
nfs_page_group_lock(req, false);
|
2014-05-15 22:56:45 +07:00
|
|
|
|
|
|
|
subreq = req;
|
|
|
|
bytes_left = subreq->wb_bytes;
|
|
|
|
offset = subreq->wb_offset;
|
|
|
|
pgbase = subreq->wb_pgbase;
|
|
|
|
|
|
|
|
do {
|
|
|
|
if (!nfs_pageio_do_add_request(desc, subreq)) {
|
|
|
|
/* make sure pg_test call(s) did nothing */
|
|
|
|
WARN_ON_ONCE(subreq->wb_bytes != bytes_left);
|
|
|
|
WARN_ON_ONCE(subreq->wb_offset != offset);
|
|
|
|
WARN_ON_ONCE(subreq->wb_pgbase != pgbase);
|
|
|
|
|
|
|
|
nfs_page_group_unlock(req);
|
|
|
|
desc->pg_moreio = 1;
|
|
|
|
nfs_pageio_doio(desc);
|
|
|
|
if (desc->pg_error < 0)
|
|
|
|
return 0;
|
|
|
|
if (desc->pg_recoalesce)
|
|
|
|
return 0;
|
|
|
|
/* retry add_request for this subreq */
|
2014-08-08 22:00:55 +07:00
|
|
|
nfs_page_group_lock(req, false);
|
2014-05-15 22:56:45 +07:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* check for buggy pg_test call(s) */
|
|
|
|
WARN_ON_ONCE(subreq->wb_bytes + subreq->wb_pgbase > PAGE_SIZE);
|
|
|
|
WARN_ON_ONCE(subreq->wb_bytes > bytes_left);
|
|
|
|
WARN_ON_ONCE(subreq->wb_bytes == 0);
|
|
|
|
|
|
|
|
bytes_left -= subreq->wb_bytes;
|
|
|
|
offset += subreq->wb_bytes;
|
|
|
|
pgbase += subreq->wb_bytes;
|
|
|
|
|
|
|
|
if (bytes_left) {
|
|
|
|
subreq = nfs_create_request(req->wb_context,
|
|
|
|
req->wb_page,
|
|
|
|
subreq, pgbase, bytes_left);
|
2014-05-29 22:38:15 +07:00
|
|
|
if (IS_ERR(subreq))
|
|
|
|
goto err_ptr;
|
2014-05-15 22:56:45 +07:00
|
|
|
nfs_lock_request(subreq);
|
|
|
|
subreq->wb_offset = offset;
|
|
|
|
subreq->wb_index = req->wb_index;
|
|
|
|
}
|
|
|
|
} while (bytes_left > 0);
|
|
|
|
|
|
|
|
nfs_page_group_unlock(req);
|
2007-04-03 05:48:28 +07:00
|
|
|
return 1;
|
2014-05-29 22:38:15 +07:00
|
|
|
err_ptr:
|
|
|
|
desc->pg_error = PTR_ERR(subreq);
|
|
|
|
nfs_page_group_unlock(req);
|
|
|
|
return 0;
|
2007-04-03 05:48:28 +07:00
|
|
|
}
|
|
|
|
|
2011-07-13 00:42:02 +07:00
|
|
|
static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
|
|
|
|
{
|
|
|
|
LIST_HEAD(head);
|
|
|
|
|
|
|
|
do {
|
|
|
|
list_splice_init(&desc->pg_list, &head);
|
|
|
|
desc->pg_bytes_written -= desc->pg_count;
|
|
|
|
desc->pg_count = 0;
|
|
|
|
desc->pg_base = 0;
|
|
|
|
desc->pg_recoalesce = 0;
|
2014-07-14 02:13:19 +07:00
|
|
|
desc->pg_moreio = 0;
|
2011-07-13 00:42:02 +07:00
|
|
|
|
|
|
|
while (!list_empty(&head)) {
|
|
|
|
struct nfs_page *req;
|
|
|
|
|
|
|
|
req = list_first_entry(&head, struct nfs_page, wb_list);
|
|
|
|
nfs_list_remove_request(req);
|
|
|
|
if (__nfs_pageio_add_request(desc, req))
|
|
|
|
continue;
|
|
|
|
if (desc->pg_error < 0)
|
|
|
|
return 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} while (desc->pg_recoalesce);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
|
|
|
|
struct nfs_page *req)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
do {
|
|
|
|
ret = __nfs_pageio_add_request(desc, req);
|
|
|
|
if (ret)
|
|
|
|
break;
|
|
|
|
if (desc->pg_error < 0)
|
|
|
|
break;
|
|
|
|
ret = nfs_do_recoalesce(desc);
|
|
|
|
} while (ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-06-09 22:48:38 +07:00
|
|
|
/*
|
|
|
|
* nfs_pageio_resend - Transfer requests to new descriptor and resend
|
|
|
|
* @hdr - the pgio header to move request from
|
|
|
|
* @desc - the pageio descriptor to add requests to
|
|
|
|
*
|
|
|
|
* Try to move each request (nfs_page) from @hdr to @desc then attempt
|
|
|
|
* to send them.
|
|
|
|
*
|
|
|
|
* Returns 0 on success and < 0 on error.
|
|
|
|
*/
|
|
|
|
int nfs_pageio_resend(struct nfs_pageio_descriptor *desc,
|
|
|
|
struct nfs_pgio_header *hdr)
|
|
|
|
{
|
|
|
|
LIST_HEAD(failed);
|
|
|
|
|
|
|
|
desc->pg_dreq = hdr->dreq;
|
|
|
|
while (!list_empty(&hdr->pages)) {
|
|
|
|
struct nfs_page *req = nfs_list_entry(hdr->pages.next);
|
|
|
|
|
|
|
|
nfs_list_remove_request(req);
|
|
|
|
if (!nfs_pageio_add_request(desc, req))
|
|
|
|
nfs_list_add_request(req, &failed);
|
|
|
|
}
|
|
|
|
nfs_pageio_complete(desc);
|
|
|
|
if (!list_empty(&failed)) {
|
|
|
|
list_move(&failed, &hdr->pages);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(nfs_pageio_resend);
|
2011-07-13 00:42:02 +07:00
|
|
|
|
2007-04-03 05:48:28 +07:00
|
|
|
/**
|
|
|
|
* nfs_pageio_complete - Complete I/O on an nfs_pageio_descriptor
|
|
|
|
* @desc: pointer to io descriptor
|
|
|
|
*/
|
|
|
|
void nfs_pageio_complete(struct nfs_pageio_descriptor *desc)
|
|
|
|
{
|
2011-07-13 00:42:02 +07:00
|
|
|
for (;;) {
|
|
|
|
nfs_pageio_doio(desc);
|
|
|
|
if (!desc->pg_recoalesce)
|
|
|
|
break;
|
|
|
|
if (!nfs_do_recoalesce(desc))
|
|
|
|
break;
|
|
|
|
}
|
2007-04-03 05:48:28 +07:00
|
|
|
}
|
|
|
|
|
2007-05-20 21:18:27 +07:00
|
|
|
/**
|
|
|
|
* nfs_pageio_cond_complete - Conditional I/O completion
|
|
|
|
* @desc: pointer to io descriptor
|
|
|
|
* @index: page index
|
|
|
|
*
|
|
|
|
* It is important to ensure that processes don't try to take locks
|
|
|
|
* on non-contiguous ranges of pages as that might deadlock. This
|
|
|
|
* function should be called before attempting to wait on a locked
|
|
|
|
* nfs_page. It will complete the I/O if the page index 'index'
|
|
|
|
* is not contiguous with the existing list of pages in 'desc'.
|
|
|
|
*/
|
|
|
|
void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index)
|
|
|
|
{
|
|
|
|
if (!list_empty(&desc->pg_list)) {
|
|
|
|
struct nfs_page *prev = nfs_list_entry(desc->pg_list.prev);
|
|
|
|
if (index != prev->wb_index + 1)
|
2011-07-13 00:42:02 +07:00
|
|
|
nfs_pageio_complete(desc);
|
2007-05-20 21:18:27 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
NFS: Split fs/nfs/inode.c
As fs/nfs/inode.c is rather large, heterogenous and unwieldy, the attached
patch splits it up into a number of files:
(*) fs/nfs/inode.c
Strictly inode specific functions.
(*) fs/nfs/super.c
Superblock management functions for NFS and NFS4, normal access, clones
and referrals. The NFS4 superblock functions _could_ move out into a
separate conditionally compiled file, but it's probably not worth it as
there're so many common bits.
(*) fs/nfs/namespace.c
Some namespace-specific functions have been moved here.
(*) fs/nfs/nfs4namespace.c
NFS4-specific namespace functions (this could be merged into the previous
file). This file is conditionally compiled.
(*) fs/nfs/internal.h
Inter-file declarations, plus a few simple utility functions moved from
fs/nfs/inode.c.
Additionally, all the in-.c-file externs have been moved here, and those
files they were moved from now includes this file.
For the most part, the functions have not been changed, only some multiplexor
functions have changed significantly.
I've also:
(*) Added some extra banner comments above some functions.
(*) Rearranged the function order within the files to be more logical and
better grouped (IMO), though someone may prefer a different order.
(*) Reduced the number of #ifdefs in .c files.
(*) Added missing __init and __exit directives.
Signed-Off-By: David Howells <dhowells@redhat.com>
2006-06-09 20:34:33 +07:00
|
|
|
int __init nfs_init_nfspagecache(void)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
nfs_page_cachep = kmem_cache_create("nfs_page",
|
|
|
|
sizeof(struct nfs_page),
|
|
|
|
0, SLAB_HWCACHE_ALIGN,
|
2007-07-20 08:11:58 +07:00
|
|
|
NULL);
|
2005-04-17 05:20:36 +07:00
|
|
|
if (nfs_page_cachep == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-06-28 02:59:15 +07:00
|
|
|
void nfs_destroy_nfspagecache(void)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2006-09-27 15:49:40 +07:00
|
|
|
kmem_cache_destroy(nfs_page_cachep);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2014-05-06 20:12:36 +07:00
|
|
|
static const struct rpc_call_ops nfs_pgio_common_ops = {
|
2014-05-06 20:12:33 +07:00
|
|
|
.rpc_call_prepare = nfs_pgio_prepare,
|
|
|
|
.rpc_call_done = nfs_pgio_result,
|
|
|
|
.rpc_release = nfs_pgio_release,
|
|
|
|
};
|
2014-05-06 20:12:40 +07:00
|
|
|
|
|
|
|
const struct nfs_pageio_ops nfs_pgio_rw_ops = {
|
|
|
|
.pg_test = nfs_generic_pg_test,
|
|
|
|
.pg_doio = nfs_generic_pg_pgios,
|
|
|
|
};
|