⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 clnt.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 3 页
字号:
/* *  linux/net/sunrpc/clnt.c * *  This file contains the high-level RPC interface. *  It is modeled as a finite state machine to support both synchronous *  and asynchronous requests. * *  -	RPC header generation and argument serialization. *  -	Credential refresh. *  -	TCP connect handling. *  -	Retry of operation when it is suspected the operation failed because *	of uid squashing on the server, or when the credentials were stale *	and need to be refreshed, or when a packet was damaged in transit. *	This may be have to be moved to the VFS layer. * *  NB: BSD uses a more intelligent approach to guessing when a request *  or reply has been lost by keeping the RTO estimate for each procedure. *  We currently make do with a constant timeout value. * *  Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com> *  Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de> */#include <asm/system.h>#include <linux/module.h>#include <linux/types.h>#include <linux/mm.h>#include <linux/slab.h>#include <linux/smp_lock.h>#include <linux/utsname.h>#include <linux/workqueue.h>#include <linux/sunrpc/clnt.h>#include <linux/sunrpc/rpc_pipe_fs.h>#include <linux/sunrpc/metrics.h>#ifdef RPC_DEBUG# define RPCDBG_FACILITY	RPCDBG_CALL#endif#define dprint_status(t)					\	dprintk("RPC: %5u %s (status %d)\n", t->tk_pid,		\			__FUNCTION__, t->tk_status)/* * All RPC clients are linked into this list */static LIST_HEAD(all_clients);static DEFINE_SPINLOCK(rpc_client_lock);static DECLARE_WAIT_QUEUE_HEAD(destroy_wait);static void	call_start(struct rpc_task *task);static void	call_reserve(struct rpc_task *task);static void	call_reserveresult(struct rpc_task *task);static void	call_allocate(struct rpc_task *task);static void	call_encode(struct rpc_task *task);static void	call_decode(struct rpc_task *task);static void	call_bind(struct rpc_task *task);static void	call_bind_status(struct rpc_task *task);static void	call_transmit(struct rpc_task *task);static void	call_status(struct rpc_task *task);static void	call_transmit_status(struct rpc_task *task);static void	call_refresh(struct rpc_task *task);static void	call_refreshresult(struct rpc_task *task);static void	call_timeout(struct rpc_task *task);static void	call_connect(struct rpc_task *task);static void	call_connect_status(struct rpc_task *task);static __be32 *	call_header(struct rpc_task *task);static __be32 *	call_verify(struct rpc_task *task);static int	rpc_ping(struct rpc_clnt *clnt, int flags);static void rpc_register_client(struct rpc_clnt *clnt){	spin_lock(&rpc_client_lock);	list_add(&clnt->cl_clients, &all_clients);	spin_unlock(&rpc_client_lock);}static void rpc_unregister_client(struct rpc_clnt *clnt){	spin_lock(&rpc_client_lock);	list_del(&clnt->cl_clients);	spin_unlock(&rpc_client_lock);}static intrpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name){	static uint32_t clntid;	int error;	clnt->cl_vfsmnt = ERR_PTR(-ENOENT);	clnt->cl_dentry = ERR_PTR(-ENOENT);	if (dir_name == NULL)		return 0;	clnt->cl_vfsmnt = rpc_get_mount();	if (IS_ERR(clnt->cl_vfsmnt))		return PTR_ERR(clnt->cl_vfsmnt);	for (;;) {		snprintf(clnt->cl_pathname, sizeof(clnt->cl_pathname),				"%s/clnt%x", dir_name,				(unsigned int)clntid++);		clnt->cl_pathname[sizeof(clnt->cl_pathname) - 1] = '\0';		clnt->cl_dentry = rpc_mkdir(clnt->cl_pathname, clnt);		if (!IS_ERR(clnt->cl_dentry))			return 0;		error = PTR_ERR(clnt->cl_dentry);		if (error != -EEXIST) {			printk(KERN_INFO "RPC: Couldn't create pipefs entry %s, error %d\n",					clnt->cl_pathname, error);			rpc_put_mount();			return error;		}	}}static struct rpc_clnt * rpc_new_client(struct rpc_xprt *xprt, char *servname, struct rpc_program *program, u32 vers, rpc_authflavor_t flavor){	struct rpc_version	*version;	struct rpc_clnt		*clnt = NULL;	struct rpc_auth		*auth;	int err;	size_t len;	/* sanity check the name before trying to print it */	err = -EINVAL;	len = strlen(servname);	if (len > RPC_MAXNETNAMELEN)		goto out_no_rpciod;	len++;	dprintk("RPC:       creating %s client for %s (xprt %p)\n",			program->name, servname, xprt);	err = rpciod_up();	if (err)		goto out_no_rpciod;	err = -EINVAL;	if (!xprt)		goto out_no_xprt;	if (vers >= program->nrvers || !(version = program->version[vers]))		goto out_err;	err = -ENOMEM;	clnt = kzalloc(sizeof(*clnt), GFP_KERNEL);	if (!clnt)		goto out_err;	clnt->cl_parent = clnt;	clnt->cl_server = clnt->cl_inline_name;	if (len > sizeof(clnt->cl_inline_name)) {		char *buf = kmalloc(len, GFP_KERNEL);		if (buf != 0)			clnt->cl_server = buf;		else			len = sizeof(clnt->cl_inline_name);	}	strlcpy(clnt->cl_server, servname, len);	clnt->cl_xprt     = xprt;	clnt->cl_procinfo = version->procs;	clnt->cl_maxproc  = version->nrprocs;	clnt->cl_protname = program->name;	clnt->cl_prog     = program->number;	clnt->cl_vers     = version->number;	clnt->cl_stats    = program->stats;	clnt->cl_metrics  = rpc_alloc_iostats(clnt);	err = -ENOMEM;	if (clnt->cl_metrics == NULL)		goto out_no_stats;	clnt->cl_program  = program;	INIT_LIST_HEAD(&clnt->cl_tasks);	spin_lock_init(&clnt->cl_lock);	if (!xprt_bound(clnt->cl_xprt))		clnt->cl_autobind = 1;	clnt->cl_rtt = &clnt->cl_rtt_default;	rpc_init_rtt(&clnt->cl_rtt_default, xprt->timeout.to_initval);	kref_init(&clnt->cl_kref);	err = rpc_setup_pipedir(clnt, program->pipe_dir_name);	if (err < 0)		goto out_no_path;	auth = rpcauth_create(flavor, clnt);	if (IS_ERR(auth)) {		printk(KERN_INFO "RPC: Couldn't create auth handle (flavor %u)\n",				flavor);		err = PTR_ERR(auth);		goto out_no_auth;	}	/* save the nodename */	clnt->cl_nodelen = strlen(utsname()->nodename);	if (clnt->cl_nodelen > UNX_MAXNODENAME)		clnt->cl_nodelen = UNX_MAXNODENAME;	memcpy(clnt->cl_nodename, utsname()->nodename, clnt->cl_nodelen);	rpc_register_client(clnt);	return clnt;out_no_auth:	if (!IS_ERR(clnt->cl_dentry)) {		rpc_rmdir(clnt->cl_dentry);		rpc_put_mount();	}out_no_path:	rpc_free_iostats(clnt->cl_metrics);out_no_stats:	if (clnt->cl_server != clnt->cl_inline_name)		kfree(clnt->cl_server);	kfree(clnt);out_err:	xprt_put(xprt);out_no_xprt:	rpciod_down();out_no_rpciod:	return ERR_PTR(err);}/* * rpc_create - create an RPC client and transport with one call * @args: rpc_clnt create argument structure * * Creates and initializes an RPC transport and an RPC client. * * It can ping the server in order to determine if it is up, and to see if * it supports this program and version.  RPC_CLNT_CREATE_NOPING disables * this behavior so asynchronous tasks can also use rpc_create. */struct rpc_clnt *rpc_create(struct rpc_create_args *args){	struct rpc_xprt *xprt;	struct rpc_clnt *clnt;	struct xprt_create xprtargs = {		.ident = args->protocol,		.srcaddr = args->saddress,		.dstaddr = args->address,		.addrlen = args->addrsize,		.timeout = args->timeout	};	char servername[20];	xprt = xprt_create_transport(&xprtargs);	if (IS_ERR(xprt))		return (struct rpc_clnt *)xprt;	/*	 * If the caller chooses not to specify a hostname, whip	 * up a string representation of the passed-in address.	 */	if (args->servername == NULL) {		struct sockaddr_in *addr =					(struct sockaddr_in *) args->address;		snprintf(servername, sizeof(servername), NIPQUAD_FMT,			NIPQUAD(addr->sin_addr.s_addr));		args->servername = servername;	}	/*	 * By default, kernel RPC client connects from a reserved port.	 * CAP_NET_BIND_SERVICE will not be set for unprivileged requesters,	 * but it is always enabled for rpciod, which handles the connect	 * operation.	 */	xprt->resvport = 1;	if (args->flags & RPC_CLNT_CREATE_NONPRIVPORT)		xprt->resvport = 0;	clnt = rpc_new_client(xprt, args->servername, args->program,				args->version, args->authflavor);	if (IS_ERR(clnt))		return clnt;	if (!(args->flags & RPC_CLNT_CREATE_NOPING)) {		int err = rpc_ping(clnt, RPC_TASK_SOFT|RPC_TASK_NOINTR);		if (err != 0) {			rpc_shutdown_client(clnt);			return ERR_PTR(err);		}	}	clnt->cl_softrtry = 1;	if (args->flags & RPC_CLNT_CREATE_HARDRTRY)		clnt->cl_softrtry = 0;	if (args->flags & RPC_CLNT_CREATE_INTR)		clnt->cl_intr = 1;	if (args->flags & RPC_CLNT_CREATE_AUTOBIND)		clnt->cl_autobind = 1;	if (args->flags & RPC_CLNT_CREATE_DISCRTRY)		clnt->cl_discrtry = 1;	return clnt;}EXPORT_SYMBOL_GPL(rpc_create);/* * This function clones the RPC client structure. It allows us to share the * same transport while varying parameters such as the authentication * flavour. */struct rpc_clnt *rpc_clone_client(struct rpc_clnt *clnt){	struct rpc_clnt *new;	int err = -ENOMEM;	new = kmemdup(clnt, sizeof(*new), GFP_KERNEL);	if (!new)		goto out_no_clnt;	new->cl_parent = clnt;	/* Turn off autobind on clones */	new->cl_autobind = 0;	INIT_LIST_HEAD(&new->cl_tasks);	spin_lock_init(&new->cl_lock);	rpc_init_rtt(&new->cl_rtt_default, clnt->cl_xprt->timeout.to_initval);	new->cl_metrics = rpc_alloc_iostats(clnt);	if (new->cl_metrics == NULL)		goto out_no_stats;	kref_init(&new->cl_kref);	err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name);	if (err != 0)		goto out_no_path;	if (new->cl_auth)		atomic_inc(&new->cl_auth->au_count);	xprt_get(clnt->cl_xprt);	kref_get(&clnt->cl_kref);	rpc_register_client(new);	rpciod_up();	return new;out_no_path:	rpc_free_iostats(new->cl_metrics);out_no_stats:	kfree(new);out_no_clnt:	dprintk("RPC:       %s: returned error %d\n", __FUNCTION__, err);	return ERR_PTR(err);}/* * Properly shut down an RPC client, terminating all outstanding * requests. */void rpc_shutdown_client(struct rpc_clnt *clnt){	dprintk("RPC:       shutting down %s client for %s\n",			clnt->cl_protname, clnt->cl_server);	while (!list_empty(&clnt->cl_tasks)) {		rpc_killall_tasks(clnt);		wait_event_timeout(destroy_wait,			list_empty(&clnt->cl_tasks), 1*HZ);	}	rpc_release_client(clnt);}/* * Free an RPC client */static voidrpc_free_client(struct kref *kref){	struct rpc_clnt *clnt = container_of(kref, struct rpc_clnt, cl_kref);	dprintk("RPC:       destroying %s client for %s\n",			clnt->cl_protname, clnt->cl_server);	if (!IS_ERR(clnt->cl_dentry)) {		rpc_rmdir(clnt->cl_dentry);		rpc_put_mount();	}	if (clnt->cl_parent != clnt) {		rpc_release_client(clnt->cl_parent);		goto out_free;	}	if (clnt->cl_server != clnt->cl_inline_name)		kfree(clnt->cl_server);out_free:	rpc_unregister_client(clnt);	rpc_free_iostats(clnt->cl_metrics);	clnt->cl_metrics = NULL;	xprt_put(clnt->cl_xprt);	rpciod_down();	kfree(clnt);}/* * Free an RPC client */static voidrpc_free_auth(struct kref *kref){	struct rpc_clnt *clnt = container_of(kref, struct rpc_clnt, cl_kref);	if (clnt->cl_auth == NULL) {		rpc_free_client(kref);		return;	}	/*	 * Note: RPCSEC_GSS may need to send NULL RPC calls in order to	 *       release remaining GSS contexts. This mechanism ensures	 *       that it can do so safely.	 */	kref_init(kref);	rpcauth_release(clnt->cl_auth);	clnt->cl_auth = NULL;	kref_put(kref, rpc_free_client);}/* * Release reference to the RPC client */voidrpc_release_client(struct rpc_clnt *clnt){	dprintk("RPC:       rpc_release_client(%p)\n", clnt);	if (list_empty(&clnt->cl_tasks))		wake_up(&destroy_wait);	kref_put(&clnt->cl_kref, rpc_free_auth);}/** * rpc_bind_new_program - bind a new RPC program to an existing client * @old - old rpc_client * @program - rpc program to set * @vers - rpc program version * * Clones the rpc client and sets up a new RPC program. This is mainly * of use for enabling different RPC programs to share the same transport. * The Sun NFSv2/v3 ACL protocol can do this. */struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old,				      struct rpc_program *program,				      u32 vers){	struct rpc_clnt *clnt;	struct rpc_version *version;	int err;	BUG_ON(vers >= program->nrvers || !program->version[vers]);	version = program->version[vers];	clnt = rpc_clone_client(old);	if (IS_ERR(clnt))		goto out;	clnt->cl_procinfo = version->procs;	clnt->cl_maxproc  = version->nrprocs;	clnt->cl_protname = program->name;	clnt->cl_prog     = program->number;	clnt->cl_vers     = version->number;	clnt->cl_stats    = program->stats;	err = rpc_ping(clnt, RPC_TASK_SOFT|RPC_TASK_NOINTR);	if (err != 0) {		rpc_shutdown_client(clnt);		clnt = ERR_PTR(err);	}out:	return clnt;}/* * Default callback for async RPC calls */static voidrpc_default_callback(struct rpc_task *task, void *data){}static const struct rpc_call_ops rpc_default_ops = {	.rpc_call_done = rpc_default_callback,};/* *	Export the signal mask handling for synchronous code that *	sleeps on RPC calls */#define RPC_INTR_SIGNALS (sigmask(SIGHUP) | sigmask(SIGINT) | sigmask(SIGQUIT) | sigmask(SIGTERM))static void rpc_save_sigmask(sigset_t *oldset, int intr){	unsigned long	sigallow = sigmask(SIGKILL);	sigset_t sigmask;	/* Block all signals except those listed in sigallow */	if (intr)		sigallow |= RPC_INTR_SIGNALS;	siginitsetinv(&sigmask, sigallow);	sigprocmask(SIG_BLOCK, &sigmask, oldset);}static inline void rpc_task_sigmask(struct rpc_task *task, sigset_t *oldset){	rpc_save_sigmask(oldset, !RPC_TASK_UNINTERRUPTIBLE(task));}static inline void rpc_restore_sigmask(sigset_t *oldset){	sigprocmask(SIG_SETMASK, oldset, NULL);}void rpc_clnt_sigmask(struct rpc_clnt *clnt, sigset_t *oldset){	rpc_save_sigmask(oldset, clnt->cl_intr);}void rpc_clnt_sigunmask(struct rpc_clnt *clnt, sigset_t *oldset){	rpc_restore_sigmask(oldset);}staticstruct rpc_task *rpc_do_run_task(struct rpc_clnt *clnt,

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -