⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 nfs4state.c

📁 Linux Kernel 2.6.9 for OMAP1710
💻 C
📖 第 1 页 / 共 2 页
字号:
/* *  fs/nfs/nfs4state.c * *  Client-side XDR for NFSv4. * *  Copyright (c) 2002 The Regents of the University of Michigan. *  All rights reserved. * *  Kendrick Smith <kmsmith@umich.edu> * *  Redistribution and use in source and binary forms, with or without *  modification, are permitted provided that the following conditions *  are met: * *  1. Redistributions of source code must retain the above copyright *     notice, this list of conditions and the following disclaimer. *  2. Redistributions in binary form must reproduce the above copyright *     notice, this list of conditions and the following disclaimer in the *     documentation and/or other materials provided with the distribution. *  3. Neither the name of the University nor the names of its *     contributors may be used to endorse or promote products derived *     from this software without specific prior written permission. * *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Implementation of the NFSv4 state model.  For the time being, * this is minimal, but will be made much more complex in a * subsequent patch. */#include <linux/config.h>#include <linux/slab.h>#include <linux/smp_lock.h>#include <linux/nfs_fs.h>#include <linux/nfs_idmap.h>#include <linux/workqueue.h>#include <linux/bitops.h>#include "callback.h"#include "delegation.h"#define OPENOWNER_POOL_SIZE	8static spinlock_t		state_spinlock = SPIN_LOCK_UNLOCKED;nfs4_stateid zero_stateid;#if 0nfs4_stateid one_stateid =	{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };#endifstatic LIST_HEAD(nfs4_clientid_list);static void nfs4_recover_state(void *);extern void nfs4_renew_state(void *);voidinit_nfsv4_state(struct nfs_server *server){	server->nfs4_state = NULL;	INIT_LIST_HEAD(&server->nfs4_siblings);}voiddestroy_nfsv4_state(struct nfs_server *server){	if (server->mnt_path) {		kfree(server->mnt_path);		server->mnt_path = NULL;	}	if (server->nfs4_state) {		nfs4_put_client(server->nfs4_state);		server->nfs4_state = NULL;	}}/* * nfs4_get_client(): returns an empty client structure * nfs4_put_client(): drops reference to client structure * * Since these are allocated/deallocated very rarely, we don't * bother putting them in a slab cache... */static struct nfs4_client *nfs4_alloc_client(struct in_addr *addr){	struct nfs4_client *clp;	if (nfs_callback_up() < 0)		return NULL;	if ((clp = kmalloc(sizeof(*clp), GFP_KERNEL)) == NULL) {		nfs_callback_down();		return NULL;	}	memset(clp, 0, sizeof(*clp));	memcpy(&clp->cl_addr, addr, sizeof(clp->cl_addr));	init_rwsem(&clp->cl_sem);	INIT_LIST_HEAD(&clp->cl_delegations);	INIT_LIST_HEAD(&clp->cl_state_owners);	INIT_LIST_HEAD(&clp->cl_unused);	spin_lock_init(&clp->cl_lock);	atomic_set(&clp->cl_count, 1);	INIT_WORK(&clp->cl_recoverd, nfs4_recover_state, clp);	INIT_WORK(&clp->cl_renewd, nfs4_renew_state, clp);	INIT_LIST_HEAD(&clp->cl_superblocks);	init_waitqueue_head(&clp->cl_waitq);	rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS4 client");	clp->cl_state = 1 << NFS4CLNT_OK;	return clp;}static voidnfs4_free_client(struct nfs4_client *clp){	struct nfs4_state_owner *sp;	while (!list_empty(&clp->cl_unused)) {		sp = list_entry(clp->cl_unused.next,				struct nfs4_state_owner,				so_list);		list_del(&sp->so_list);		kfree(sp);	}	BUG_ON(!list_empty(&clp->cl_state_owners));	if (clp->cl_cred)		put_rpccred(clp->cl_cred);	nfs_idmap_delete(clp);	if (clp->cl_rpcclient)		rpc_shutdown_client(clp->cl_rpcclient);	kfree(clp);	nfs_callback_down();}static struct nfs4_client *__nfs4_find_client(struct in_addr *addr){	struct nfs4_client *clp;	list_for_each_entry(clp, &nfs4_clientid_list, cl_servers) {		if (memcmp(&clp->cl_addr, addr, sizeof(clp->cl_addr)) == 0) {			atomic_inc(&clp->cl_count);			return clp;		}	}	return NULL;}struct nfs4_client *nfs4_find_client(struct in_addr *addr){	struct nfs4_client *clp;	spin_lock(&state_spinlock);	clp = __nfs4_find_client(addr);	spin_unlock(&state_spinlock);	return clp;}struct nfs4_client *nfs4_get_client(struct in_addr *addr){	struct nfs4_client *clp, *new = NULL;	spin_lock(&state_spinlock);	for (;;) {		clp = __nfs4_find_client(addr);		if (clp != NULL)			break;		clp = new;		if (clp != NULL) {			list_add(&clp->cl_servers, &nfs4_clientid_list);			new = NULL;			break;		}		spin_unlock(&state_spinlock);		new = nfs4_alloc_client(addr);		spin_lock(&state_spinlock);		if (new == NULL)			break;	}	spin_unlock(&state_spinlock);	if (new)		nfs4_free_client(new);	return clp;}voidnfs4_put_client(struct nfs4_client *clp){	if (!atomic_dec_and_lock(&clp->cl_count, &state_spinlock))		return;	list_del(&clp->cl_servers);	spin_unlock(&state_spinlock);	BUG_ON(!list_empty(&clp->cl_superblocks));	wake_up_all(&clp->cl_waitq);	rpc_wake_up(&clp->cl_rpcwaitq);	nfs4_kill_renewd(clp);	nfs4_free_client(clp);}int nfs4_init_client(struct nfs4_client *clp){	int status = nfs4_proc_setclientid(clp, NFS4_CALLBACK, nfs_callback_tcpport);	if (status == 0)		status = nfs4_proc_setclientid_confirm(clp);	if (status == 0)		nfs4_schedule_state_renewal(clp);	return status;}u32nfs4_alloc_lockowner_id(struct nfs4_client *clp){	return clp->cl_lockowner_id ++;}static struct nfs4_state_owner *nfs4_client_grab_unused(struct nfs4_client *clp, struct rpc_cred *cred){	struct nfs4_state_owner *sp = NULL;	if (!list_empty(&clp->cl_unused)) {		sp = list_entry(clp->cl_unused.next, struct nfs4_state_owner, so_list);		atomic_inc(&sp->so_count);		sp->so_cred = cred;		list_move(&sp->so_list, &clp->cl_state_owners);		clp->cl_nunused--;	}	return sp;}static struct nfs4_state_owner *nfs4_find_state_owner(struct nfs4_client *clp, struct rpc_cred *cred){	struct nfs4_state_owner *sp, *res = NULL;	list_for_each_entry(sp, &clp->cl_state_owners, so_list) {		if (sp->so_cred != cred)			continue;		atomic_inc(&sp->so_count);		/* Move to the head of the list */		list_move(&sp->so_list, &clp->cl_state_owners);		res = sp;		break;	}	return res;}/* * nfs4_alloc_state_owner(): this is called on the OPEN or CREATE path to * create a new state_owner. * */static struct nfs4_state_owner *nfs4_alloc_state_owner(void){	struct nfs4_state_owner *sp;	sp = kmalloc(sizeof(*sp),GFP_KERNEL);	if (!sp)		return NULL;	init_MUTEX(&sp->so_sema);	sp->so_seqid = 0;                 /* arbitrary */	INIT_LIST_HEAD(&sp->so_states);	INIT_LIST_HEAD(&sp->so_delegations);	atomic_set(&sp->so_count, 1);	return sp;}static voidnfs4_unhash_state_owner(struct nfs4_state_owner *sp){	struct nfs4_client *clp = sp->so_client;	spin_lock(&clp->cl_lock);	list_del_init(&sp->so_list);	spin_unlock(&clp->cl_lock);}/* * Note: must be called with clp->cl_sem held in order to prevent races *       with reboot recovery! */struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct rpc_cred *cred){	struct nfs4_client *clp = server->nfs4_state;	struct nfs4_state_owner *sp, *new;	get_rpccred(cred);	new = nfs4_alloc_state_owner();	spin_lock(&clp->cl_lock);	sp = nfs4_find_state_owner(clp, cred);	if (sp == NULL)		sp = nfs4_client_grab_unused(clp, cred);	if (sp == NULL && new != NULL) {		list_add(&new->so_list, &clp->cl_state_owners);		new->so_client = clp;		new->so_id = nfs4_alloc_lockowner_id(clp);		new->so_cred = cred;		sp = new;		new = NULL;	}	spin_unlock(&clp->cl_lock);	if (new)		kfree(new);	if (sp != NULL)		return sp;	put_rpccred(cred);	return NULL;}/* * Must be called with clp->cl_sem held in order to avoid races * with state recovery... */void nfs4_put_state_owner(struct nfs4_state_owner *sp){	struct nfs4_client *clp = sp->so_client;	struct rpc_cred *cred = sp->so_cred;	if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock))		return;	if (clp->cl_nunused >= OPENOWNER_POOL_SIZE)		goto out_free;	if (list_empty(&sp->so_list))		goto out_free;	list_move(&sp->so_list, &clp->cl_unused);	clp->cl_nunused++;	spin_unlock(&clp->cl_lock);	put_rpccred(cred);	cred = NULL;	return;out_free:	list_del(&sp->so_list);	spin_unlock(&clp->cl_lock);	put_rpccred(cred);	kfree(sp);}static struct nfs4_state *nfs4_alloc_open_state(void){	struct nfs4_state *state;	state = kmalloc(sizeof(*state), GFP_KERNEL);	if (!state)		return NULL;	state->state = 0;	state->nreaders = 0;	state->nwriters = 0;	state->flags = 0;	memset(state->stateid.data, 0, sizeof(state->stateid.data));	atomic_set(&state->count, 1);	INIT_LIST_HEAD(&state->lock_states);	init_MUTEX(&state->lock_sema);	rwlock_init(&state->state_lock);	return state;}static struct nfs4_state *__nfs4_find_state(struct inode *inode, struct rpc_cred *cred, mode_t mode){	struct nfs_inode *nfsi = NFS_I(inode);	struct nfs4_state *state;	mode &= (FMODE_READ|FMODE_WRITE);	list_for_each_entry(state, &nfsi->open_states, inode_states) {		if (state->owner->so_cred != cred)			continue;		if ((mode & FMODE_READ) != 0 && state->nreaders == 0)			continue;		if ((mode & FMODE_WRITE) != 0 && state->nwriters == 0)			continue;		if ((state->state & mode) != mode)			continue;		atomic_inc(&state->count);		if (mode & FMODE_READ)			state->nreaders++;		if (mode & FMODE_WRITE)			state->nwriters++;		return state;	}	return NULL;}static struct nfs4_state *__nfs4_find_state_byowner(struct inode *inode, struct nfs4_state_owner *owner){	struct nfs_inode *nfsi = NFS_I(inode);	struct nfs4_state *state;	list_for_each_entry(state, &nfsi->open_states, inode_states) {		/* Is this in the process of being freed? */		if (state->nreaders == 0 && state->nwriters == 0)			continue;		if (state->owner == owner) {			atomic_inc(&state->count);			return state;		}	}	return NULL;}struct nfs4_state *nfs4_find_state(struct inode *inode, struct rpc_cred *cred, mode_t mode){	struct nfs4_state *state;	spin_lock(&inode->i_lock);	state = __nfs4_find_state(inode, cred, mode);	spin_unlock(&inode->i_lock);	return state;}static voidnfs4_free_open_state(struct nfs4_state *state){	kfree(state);}struct nfs4_state *nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner){	struct nfs4_state *state, *new;	struct nfs_inode *nfsi = NFS_I(inode);	spin_lock(&inode->i_lock);	state = __nfs4_find_state_byowner(inode, owner);	spin_unlock(&inode->i_lock);	if (state)		goto out;	new = nfs4_alloc_open_state();	spin_lock(&inode->i_lock);	state = __nfs4_find_state_byowner(inode, owner);	if (state == NULL && new != NULL) {		state = new;		/* Caller *must* be holding owner->so_sem */		list_add(&state->open_states, &owner->so_states);		state->owner = owner;		atomic_inc(&owner->so_count);		list_add(&state->inode_states, &nfsi->open_states);		state->inode = inode;		spin_unlock(&inode->i_lock);	} else {

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -