linux_dsm_epyc7002/include/linux/sunrpc/svcsock.h

73 lines
2.0 KiB
C
Raw Normal View History

/*
* linux/include/linux/sunrpc/svcsock.h
*
* RPC server socket I/O.
*
* Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
*/
#ifndef SUNRPC_SVCSOCK_H
#define SUNRPC_SVCSOCK_H
#include <linux/sunrpc/svc.h>
#include <linux/sunrpc/svc_xprt.h>
/*
* RPC server socket.
*/
struct svc_sock {
struct svc_xprt sk_xprt;
struct socket * sk_sock; /* berkeley socket layer */
struct sock * sk_sk; /* INET layer */
atomic_t sk_reserved; /* space on outq that is reserved */
spinlock_t sk_lock; /* protects sk_deferred and
* sk_info_authunix */
struct list_head sk_deferred; /* deferred requests that need to
* be revisted */
struct mutex sk_mutex; /* to serialize sending data */
/* We keep the old state_change and data_ready CB's here */
void (*sk_ostate)(struct sock *);
void (*sk_odata)(struct sock *, int bytes);
void (*sk_owspace)(struct sock *);
/* private TCP part */
int sk_reclen; /* length of record */
int sk_tcplen; /* current read length */
time_t sk_lastrecv; /* time of last received request */
[PATCH] knfsd: knfsd: cache ipmap per TCP socket Speed up high call-rate workloads by caching the struct ip_map for the peer on the connected struct svc_sock instead of looking it up in the ip_map cache hashtable on every call. This helps workloads using AUTH_SYS authentication over TCP. Testing was on a 4 CPU 4 NIC Altix using 4 IRIX clients, each with 16 synthetic client threads simulating an rsync (i.e. recursive directory listing) workload reading from an i386 RH9 install image (161480 regular files in 10841 directories) on the server. That tree is small enough to fill in the server's RAM so no disk traffic was involved. This setup gives a sustained call rate in excess of 60000 calls/sec before being CPU-bound on the server. Profiling showed strcmp(), called from ip_map_match(), was taking 4.8% of each CPU, and ip_map_lookup() was taking 2.9%. This patch drops both contribution into the profile noise. Note that the above result overstates this value of this patch for most workloads. The synthetic clients are all using separate IP addresses, so there are 64 entries in the ip_map cache hash. Because the kernel measured contained the bug fixed in commit commit 1f1e030bf75774b6a283518e1534d598e14147d4 and was running on 64bit little-endian machine, probably all of those 64 entries were on a single chain, thus increasing the cost of ip_map_lookup(). With a modern kernel you would need more clients to see the same amount of performance improvement. This patch has helped to scale knfsd to handle a deployment with 2000 NFS clients. Signed-off-by: Greg Banks <gnb@melbourne.sgi.com> Signed-off-by: Neil Brown <neilb@suse.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-10-04 16:15:50 +07:00
/* cache of various info for TCP sockets */
void *sk_info_authunix;
struct sockaddr_storage sk_local; /* local address */
struct sockaddr_storage sk_remote; /* remote peer's address */
int sk_remotelen; /* length of address */
};
/*
* Function prototypes.
*/
void svc_close_all(struct list_head *);
int svc_recv(struct svc_rqst *, long);
int svc_send(struct svc_rqst *);
void svc_drop(struct svc_rqst *);
void svc_sock_update_bufs(struct svc_serv *serv);
int svc_sock_names(char *buf, struct svc_serv *serv, char *toclose);
int svc_addsock(struct svc_serv *serv,
int fd,
char *name_return,
int *proto);
void svc_init_xprt_sock(void);
void svc_cleanup_xprt_sock(void);
/*
* svc_makesock socket characteristics
*/
#define SVC_SOCK_DEFAULTS (0U)
#define SVC_SOCK_ANONYMOUS (1U << 0) /* don't register with pmap */
#define SVC_SOCK_TEMPORARY (1U << 1) /* flag socket as temporary */
#endif /* SUNRPC_SVCSOCK_H */