⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 clnt.c

📁 嵌入式系统设计与实例开发实验教材二源码 多线程应用程序设计 串行端口程序设计 AD接口实验 CAN总线通信实验 GPS通信实验 Linux内核移植与编译实验 IC卡读写实验 SD驱动使
💻 C
📖 第 1 页 / 共 2 页
字号:
/* *  linux/net/sunrpc/rpcclnt.c * *  This file contains the high-level RPC interface. *  It is modeled as a finite state machine to support both synchronous *  and asynchronous requests. * *  -	RPC header generation and argument serialization. *  -	Credential refresh. *  -	TCP reconnect handling (when finished). *  -	Retry of operation when it is suspected the operation failed because *	of uid squashing on the server, or when the credentials were stale *	and need to be refreshed, or when a packet was damaged in transit. *	This may be have to be moved to the VFS layer. * *  NB: BSD uses a more intelligent approach to guessing when a request *  or reply has been lost by keeping the RTO estimate for each procedure. *  We currently make do with a constant timeout value. * *  Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com> *  Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de> */#include <asm/system.h>#include <linux/types.h>#include <linux/mm.h>#include <linux/slab.h>#include <linux/in.h>#include <linux/utsname.h>#include <linux/sunrpc/clnt.h>#include <linux/nfs.h>#define RPC_SLACK_SPACE		512	/* total overkill */#ifdef RPC_DEBUG# define RPCDBG_FACILITY	RPCDBG_CALL#endifstatic DECLARE_WAIT_QUEUE_HEAD(destroy_wait);static void	call_reserve(struct rpc_task *task);static void	call_reserveresult(struct rpc_task *task);static void	call_allocate(struct rpc_task *task);static void	call_encode(struct rpc_task *task);static void	call_decode(struct rpc_task *task);static void	call_bind(struct rpc_task *task);static void	call_transmit(struct rpc_task *task);static void	call_status(struct rpc_task *task);static void	call_refresh(struct rpc_task *task);static void	call_refreshresult(struct rpc_task *task);static void	call_timeout(struct rpc_task *task);static void	call_reconnect(struct rpc_task *task);static void	child_reconnect(struct rpc_task *);static void	child_reconnect_status(struct rpc_task *);static u32 *	call_header(struct rpc_task *task);static u32 *	call_verify(struct rpc_task *task);/* * Create an RPC client * FIXME: This should also take a flags argument (as in task->tk_flags). * It's called (among others) from pmap_create_client, which may in * turn be called by an async task. In this case, rpciod should not be * made to sleep too long. */struct rpc_clnt *rpc_create_client(struct rpc_xprt *xprt, char *servname,		  struct rpc_program *program, u32 vers, int flavor){	struct rpc_version	*version;	struct rpc_clnt		*clnt = NULL;	dprintk("RPC: creating %s client for %s (xprt %p)\n",		program->name, servname, xprt);#ifdef RPC_DEBUG	rpc_register_sysctl();#endif	if (!xprt)		goto out;	if (vers >= program->nrvers || !(version = program->version[vers]))		goto out;	clnt = (struct rpc_clnt *) rpc_allocate(0, sizeof(*clnt));	if (!clnt)		goto out_no_clnt;	memset(clnt, 0, sizeof(*clnt));	atomic_set(&clnt->cl_users, 0);	clnt->cl_xprt     = xprt;	clnt->cl_procinfo = version->procs;	clnt->cl_maxproc  = version->nrprocs;	clnt->cl_server   = servname;	clnt->cl_protname = program->name;	clnt->cl_port     = xprt->addr.sin_port;	clnt->cl_prog     = program->number;	clnt->cl_vers     = version->number;	clnt->cl_prot     = xprt->prot;	clnt->cl_stats    = program->stats;	clnt->cl_bindwait = RPC_INIT_WAITQ("bindwait");	if (!clnt->cl_port)		clnt->cl_autobind = 1;	if (!rpcauth_create(flavor, clnt))		goto out_no_auth;	/* save the nodename */	clnt->cl_nodelen = strlen(system_utsname.nodename);	if (clnt->cl_nodelen > UNX_MAXNODENAME)		clnt->cl_nodelen = UNX_MAXNODENAME;	memcpy(clnt->cl_nodename, system_utsname.nodename, clnt->cl_nodelen);out:	return clnt;out_no_clnt:	printk(KERN_INFO "RPC: out of memory in rpc_create_client\n");	goto out;out_no_auth:	printk(KERN_INFO "RPC: Couldn't create auth handle (flavor %d)\n",		flavor);	rpc_free(clnt);	clnt = NULL;	goto out;}/* * Properly shut down an RPC client, terminating all outstanding * requests. Note that we must be certain that cl_oneshot and * cl_dead are cleared, or else the client would be destroyed * when the last task releases it. */intrpc_shutdown_client(struct rpc_clnt *clnt){	dprintk("RPC: shutting down %s client for %s\n",		clnt->cl_protname, clnt->cl_server);	while (atomic_read(&clnt->cl_users)) {#ifdef RPC_DEBUG		dprintk("RPC: rpc_shutdown_client: client %s, tasks=%d\n",			clnt->cl_protname, atomic_read(&clnt->cl_users));#endif		/* Don't let rpc_release_client destroy us */		clnt->cl_oneshot = 0;		clnt->cl_dead = 0;		rpc_killall_tasks(clnt);		sleep_on_timeout(&destroy_wait, 1*HZ);	}	return rpc_destroy_client(clnt);}/* * Delete an RPC client */intrpc_destroy_client(struct rpc_clnt *clnt){	dprintk("RPC: destroying %s client for %s\n",			clnt->cl_protname, clnt->cl_server);	if (clnt->cl_auth) {		rpcauth_destroy(clnt->cl_auth);		clnt->cl_auth = NULL;	}	if (clnt->cl_xprt) {		xprt_destroy(clnt->cl_xprt);		clnt->cl_xprt = NULL;	}	rpc_free(clnt);	return 0;}/* * Release an RPC client */voidrpc_release_client(struct rpc_clnt *clnt){	dprintk("RPC:      rpc_release_client(%p, %d)\n",				clnt, atomic_read(&clnt->cl_users));	if (!atomic_dec_and_test(&clnt->cl_users))		return;	wake_up(&destroy_wait);	if (clnt->cl_oneshot || clnt->cl_dead)		rpc_destroy_client(clnt);}/* * Default callback for async RPC calls */static voidrpc_default_callback(struct rpc_task *task){}/* *	Export the signal mask handling for aysnchronous code that *	sleeps on RPC calls */ void rpc_clnt_sigmask(struct rpc_clnt *clnt, sigset_t *oldset){	unsigned long	sigallow = sigmask(SIGKILL);	unsigned long	irqflags;		/* Turn off various signals */	if (clnt->cl_intr) {		struct k_sigaction *action = current->sig->action;		if (action[SIGINT-1].sa.sa_handler == SIG_DFL)			sigallow |= sigmask(SIGINT);		if (action[SIGQUIT-1].sa.sa_handler == SIG_DFL)			sigallow |= sigmask(SIGQUIT);	}	spin_lock_irqsave(&current->sigmask_lock, irqflags);	*oldset = current->blocked;	siginitsetinv(&current->blocked, sigallow & ~oldset->sig[0]);	recalc_sigpending(current);	spin_unlock_irqrestore(&current->sigmask_lock, irqflags);}void rpc_clnt_sigunmask(struct rpc_clnt *clnt, sigset_t *oldset){	unsigned long	irqflags;		spin_lock_irqsave(&current->sigmask_lock, irqflags);	current->blocked = *oldset;	recalc_sigpending(current);	spin_unlock_irqrestore(&current->sigmask_lock, irqflags);}/* * New rpc_call implementation */int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg, int flags){	struct rpc_task	my_task, *task = &my_task;	sigset_t	oldset;	int		status;	/* If this client is slain all further I/O fails */	if (clnt->cl_dead) 		return -EIO;	if (flags & RPC_TASK_ASYNC) {		printk("rpc_call_sync: Illegal flag combination for synchronous task\n");		flags &= ~RPC_TASK_ASYNC;	}	rpc_clnt_sigmask(clnt, &oldset);			/* Create/initialize a new RPC task */	rpc_init_task(task, clnt, NULL, flags);	rpc_call_setup(task, msg, 0);	/* Set up the call info struct and execute the task */	if (task->tk_status == 0)		status = rpc_execute(task);	else {		status = task->tk_status;		rpc_release_task(task);	}	rpc_clnt_sigunmask(clnt, &oldset);			return status;}/* * New rpc_call implementation */intrpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags,	       rpc_action callback, void *data){	struct rpc_task	*task;	sigset_t	oldset;	int		status;	/* If this client is slain all further I/O fails */	if (clnt->cl_dead) 		return -EIO;	flags |= RPC_TASK_ASYNC;	rpc_clnt_sigmask(clnt, &oldset);			/* Create/initialize a new RPC task */	if (!callback)		callback = rpc_default_callback;	status = -ENOMEM;	if (!(task = rpc_new_task(clnt, callback, flags)))		goto out;	task->tk_calldata = data;	rpc_call_setup(task, msg, 0);	/* Set up the call info struct and execute the task */	if (task->tk_status == 0)		status = rpc_execute(task);	else {		status = task->tk_status;		rpc_release_task(task);	}out:	rpc_clnt_sigunmask(clnt, &oldset);			return status;}voidrpc_call_setup(struct rpc_task *task, struct rpc_message *msg, int flags){	task->tk_msg   = *msg;	task->tk_flags |= flags;	/* Bind the user cred */	if (task->tk_msg.rpc_cred != NULL) {		rpcauth_holdcred(task);	} else		rpcauth_bindcred(task);	if (task->tk_status == 0)		task->tk_action = call_reserve;	else		task->tk_action = NULL;	/* Increment call count */	if (task->tk_msg.rpc_proc < task->tk_client->cl_maxproc)		rpcproc_count(task->tk_client, task->tk_msg.rpc_proc)++;}/* * Restart an (async) RPC call. Usually called from within the * exit handler. */voidrpc_restart_call(struct rpc_task *task){	if (RPC_ASSASSINATED(task))		return;	task->tk_action = call_reserve;	rpcproc_count(task->tk_client, task->tk_msg.rpc_proc)++;}/* * 1.	Reserve an RPC call slot */static voidcall_reserve(struct rpc_task *task){	struct rpc_clnt	*clnt = task->tk_client;	if (task->tk_msg.rpc_proc > clnt->cl_maxproc) {		printk(KERN_WARNING "%s (vers %d): bad procedure number %d\n",			clnt->cl_protname, clnt->cl_vers, task->tk_msg.rpc_proc);		rpc_exit(task, -EIO);		return;	}	dprintk("RPC: %4d call_reserve\n", task->tk_pid);	if (!rpcauth_uptodatecred(task)) {		task->tk_action = call_refresh;		return;	}	task->tk_status  = 0;	task->tk_action  = call_reserveresult;	task->tk_timeout = clnt->cl_timeout.to_resrvval;	clnt->cl_stats->rpccnt++;	xprt_reserve(task);}/* * 1b.	Grok the result of xprt_reserve() */static voidcall_reserveresult(struct rpc_task *task){	int status = task->tk_status;	dprintk("RPC: %4d call_reserveresult (status %d)\n",				task->tk_pid, task->tk_status);	/*	 * After a call to xprt_reserve(), we must have either	 * a request slot or else an error status.	 */	if ((task->tk_status >= 0 && !task->tk_rqstp) ||	    (task->tk_status < 0 && task->tk_rqstp))		printk(KERN_ERR "call_reserveresult: status=%d, request=%p??\n",		 task->tk_status, task->tk_rqstp);	if (task->tk_status >= 0) {		task->tk_action = call_allocate;		return;	}	task->tk_status = 0;	switch (status) {	case -EAGAIN:	case -ENOBUFS:		task->tk_timeout = task->tk_client->cl_timeout.to_resrvval;		task->tk_action = call_reserve;		break;	case -ETIMEDOUT:		dprintk("RPC: task timed out\n");		task->tk_action = call_timeout;		break;	default:		if (!task->tk_rqstp) {			printk(KERN_INFO "RPC: task has no request, exit EIO\n");			rpc_exit(task, -EIO);		} else			rpc_exit(task, status);	}}/* * 2.	Allocate the buffer. For details, see sched.c:rpc_malloc. *	(Note: buffer memory is freed in rpc_task_release). */static voidcall_allocate(struct rpc_task *task){	struct rpc_clnt	*clnt = task->tk_client;	unsigned int	bufsiz;	dprintk("RPC: %4d call_allocate (status %d)\n", 				task->tk_pid, task->tk_status);	task->tk_action = call_encode;	if (task->tk_buffer)		return;	/* FIXME: compute buffer requirements more exactly using	 * auth->au_wslack */	bufsiz = rpcproc_bufsiz(clnt, task->tk_msg.rpc_proc) + RPC_SLACK_SPACE;	if ((task->tk_buffer = rpc_malloc(task, bufsiz << 1)) != NULL)

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -