⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 dev.c

📁 linux下的用户文件系统fuse-2.5.2
💻 C
📖 第 1 页 / 共 2 页
字号:
/*  FUSE: Filesystem in Userspace  Copyright (C) 2001-2006  Miklos Szeredi <miklos@szeredi.hu>  This program can be distributed under the terms of the GNU GPL.  See the file COPYING.*/#include "fuse_i.h"#include <linux/init.h>#include <linux/module.h>#include <linux/poll.h>#include <linux/uio.h>#include <linux/miscdevice.h>#include <linux/pagemap.h>#include <linux/file.h>#include <linux/slab.h>#ifdef MODULE_ALIAS_MISCDEVMODULE_ALIAS_MISCDEV(FUSE_MINOR);#endifstatic kmem_cache_t *fuse_req_cachep;static inline struct fuse_conn *fuse_get_conn(struct file *file){	struct fuse_conn *fc;	spin_lock(&fuse_lock);	fc = file->private_data;	if (fc && !fc->mounted)		fc = NULL;	spin_unlock(&fuse_lock);	return fc;}static inline void fuse_request_init(struct fuse_req *req){	memset(req, 0, sizeof(*req));	INIT_LIST_HEAD(&req->list);	init_waitqueue_head(&req->waitq);	atomic_set(&req->count, 1);}struct fuse_req *fuse_request_alloc(void){	struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, SLAB_KERNEL);	if (req)		fuse_request_init(req);	return req;}void fuse_request_free(struct fuse_req *req){	kmem_cache_free(fuse_req_cachep, req);}#ifdef KERNEL_2_6static inline void block_sigs(sigset_t *oldset){	sigset_t mask;	siginitsetinv(&mask, sigmask(SIGKILL));	sigprocmask(SIG_BLOCK, &mask, oldset);}static inline void restore_sigs(sigset_t *oldset){	sigprocmask(SIG_SETMASK, oldset, NULL);}#else#ifdef HAVE_RECALC_SIGPENDING_TSKstatic inline void block_sigs(sigset_t *oldset){	spin_lock_irq(&current->sighand->siglock);	*oldset = current->blocked;	siginitsetinv(&current->blocked, sigmask(SIGKILL) & ~oldset->sig[0]);	recalc_sigpending();	spin_unlock_irq(&current->sighand->siglock);}static inline void restore_sigs(sigset_t *oldset){	spin_lock_irq(&current->sighand->siglock);	current->blocked = *oldset;	recalc_sigpending();	spin_unlock_irq(&current->sighand->siglock);}#elsestatic inline void block_sigs(sigset_t *oldset){	spin_lock_irq(&current->sigmask_lock);	*oldset = current->blocked;	siginitsetinv(&current->blocked, sigmask(SIGKILL) & ~oldset->sig[0]);	recalc_sigpending(current);	spin_unlock_irq(&current->sigmask_lock);}static inline void restore_sigs(sigset_t *oldset){	spin_lock_irq(&current->sigmask_lock);	current->blocked = *oldset;	recalc_sigpending(current);	spin_unlock_irq(&current->sigmask_lock);}#endif#endifvoid fuse_reset_request(struct fuse_req *req){	int preallocated = req->preallocated;	BUG_ON(atomic_read(&req->count) != 1);	fuse_request_init(req);	req->preallocated = preallocated;}static void __fuse_get_request(struct fuse_req *req){	atomic_inc(&req->count);}/* Must be called with > 1 refcount */static void __fuse_put_request(struct fuse_req *req){	BUG_ON(atomic_read(&req->count) < 2);	atomic_dec(&req->count);}static struct fuse_req *do_get_request(struct fuse_conn *fc){	struct fuse_req *req;	spin_lock(&fuse_lock);	BUG_ON(list_empty(&fc->unused_list));	req = list_entry(fc->unused_list.next, struct fuse_req, list);	list_del_init(&req->list);	spin_unlock(&fuse_lock);	fuse_request_init(req);	req->preallocated = 1;	req->in.h.uid = current->fsuid;	req->in.h.gid = current->fsgid;	req->in.h.pid = current->pid;	return req;}/* This can return NULL, but only in case it's interrupted by a SIGKILL */struct fuse_req *fuse_get_request(struct fuse_conn *fc){	int intr;	sigset_t oldset;	block_sigs(&oldset);	intr = down_interruptible(&fc->outstanding_sem);	restore_sigs(&oldset);	return intr ? NULL : do_get_request(fc);}static void fuse_putback_request(struct fuse_conn *fc, struct fuse_req *req){	if (req->preallocated)		list_add(&req->list, &fc->unused_list);	else		fuse_request_free(req);	/* If we are in debt decrease that first */	if (fc->outstanding_debt)		fc->outstanding_debt--;	else		up(&fc->outstanding_sem);}void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req){	if (atomic_dec_and_test(&req->count)) {		spin_lock(&fuse_lock);		fuse_putback_request(fc, req);		spin_unlock(&fuse_lock);	}}static void fuse_put_request_locked(struct fuse_conn *fc, struct fuse_req *req){	if (atomic_dec_and_test(&req->count))		fuse_putback_request(fc, req);}void fuse_release_background(struct fuse_req *req){	iput(req->inode);	iput(req->inode2);	if (req->file)		fput(req->file);	spin_lock(&fuse_lock);	list_del(&req->bg_entry);	spin_unlock(&fuse_lock);}static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req){	int i;	struct fuse_init_out *arg = &req->misc.init_out;	if (arg->major != FUSE_KERNEL_VERSION)		fc->conn_error = 1;	else {		fc->minor = arg->minor;		fc->max_write = arg->minor < 5 ? 4096 : arg->max_write;	}	/* After INIT reply is received other requests can go	   out.  So do (FUSE_MAX_OUTSTANDING - 1) number of	   up()s on outstanding_sem.  The last up() is done in	   fuse_putback_request() */	for (i = 1; i < FUSE_MAX_OUTSTANDING; i++)		up(&fc->outstanding_sem);}/* * This function is called when a request is finished.  Either a reply * has arrived or it was interrupted (and not yet sent) or some error * occurred during communication with userspace, or the device file was * closed.  It decreases the reference count for the request.  In case * of a background request the reference to the stored objects are * released.  The requester thread is woken up (if still waiting), and * finally the request is either freed or put on the unused_list * * Called with fuse_lock, unlocks it */static void request_end(struct fuse_conn *fc, struct fuse_req *req){	req->finished = 1;	if (!req->background) {		wake_up(&req->waitq);		fuse_put_request_locked(fc, req);		spin_unlock(&fuse_lock);	} else {		spin_unlock(&fuse_lock);		down_read(&fc->sbput_sem);		if (fc->mounted)			fuse_release_background(req);		up_read(&fc->sbput_sem);		if (req->in.h.opcode == FUSE_INIT)			process_init_reply(fc, req);		else if (req->in.h.opcode == FUSE_RELEASE &&			 req->inode == NULL) {			/* Special case for failed iget in CREATE */			u64 nodeid = req->in.h.nodeid;			fuse_reset_request(req);			fuse_send_forget(fc, req, nodeid, 1);			return;		}		fuse_put_request(fc, req);	}}/* * Unfortunately request interruption not just solves the deadlock * problem, it causes problems too.  These stem from the fact, that an * interrupted request is continued to be processed in userspace, * while all the locks and object references (inode and file) held * during the operation are released. * * To release the locks is exactly why there's a need to interrupt the * request, so there's not a lot that can be done about this, except * introduce additional locking in userspace. * * More important is to keep inode and file references until userspace * has replied, otherwise FORGET and RELEASE could be sent while the * inode/file is still used by the filesystem. * * For this reason the concept of "background" request is introduced. * An interrupted request is backgrounded if it has been already sent * to userspace.  Backgrounding involves getting an extra reference to * inode(s) or file used in the request, and adding the request to * fc->background list.  When a reply is received for a background * request, the object references are released, and the request is * removed from the list.  If the filesystem is unmounted while there * are still background requests, the list is walked and references * are released as if a reply was received. * * There's one more use for a background request.  The RELEASE message is * always sent as background, since it doesn't return an error or * data. */static void background_request(struct fuse_conn *fc, struct fuse_req *req){	req->background = 1;	list_add(&req->bg_entry, &fc->background);	if (req->inode)		req->inode = igrab(req->inode);	if (req->inode2)		req->inode2 = igrab(req->inode2);	if (req->file)		get_file(req->file);}/* Called with fuse_lock held.  Releases, and then reacquires it. */static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req){	sigset_t oldset;	spin_unlock(&fuse_lock);	block_sigs(&oldset);	wait_event_interruptible(req->waitq, req->finished);	restore_sigs(&oldset);	spin_lock(&fuse_lock);	if (req->finished)		return;	req->out.h.error = -EINTR;	req->interrupted = 1;	if (req->locked) {		/* This is uninterruptible sleep, because data is		   being copied to/from the buffers of req.  During		   locked state, there mustn't be any filesystem		   operation (e.g. page fault), since that could lead		   to deadlock */		spin_unlock(&fuse_lock);		wait_event(req->waitq, !req->locked);		spin_lock(&fuse_lock);	}	if (!req->sent && !list_empty(&req->list)) {		list_del(&req->list);		__fuse_put_request(req);	} else if (!req->finished && req->sent)		background_request(fc, req);}static unsigned len_args(unsigned numargs, struct fuse_arg *args){	unsigned nbytes = 0;	unsigned i;	for (i = 0; i < numargs; i++)		nbytes += args[i].size;	return nbytes;}static void queue_request(struct fuse_conn *fc, struct fuse_req *req){	fc->reqctr++;	/* zero is special */	if (fc->reqctr == 0)		fc->reqctr = 1;	req->in.h.unique = fc->reqctr;	req->in.h.len = sizeof(struct fuse_in_header) +		len_args(req->in.numargs, (struct fuse_arg *) req->in.args);	if (!req->preallocated) {		/* If request is not preallocated (either FORGET or		   RELEASE), then still decrease outstanding_sem, so		   user can't open infinite number of files while not		   processing the RELEASE requests.  However for		   efficiency do it without blocking, so if down()		   would block, just increase the debt instead */		if (down_trylock(&fc->outstanding_sem))			fc->outstanding_debt++;	}	list_add_tail(&req->list, &fc->pending);	wake_up(&fc->waitq);}/* * This can only be interrupted by a SIGKILL */void request_send(struct fuse_conn *fc, struct fuse_req *req){	req->isreply = 1;	spin_lock(&fuse_lock);	if (!fc->connected)		req->out.h.error = -ENOTCONN;	else if (fc->conn_error)		req->out.h.error = -ECONNREFUSED;	else {		queue_request(fc, req);		/* acquire extra reference, since request is still needed		   after request_end() */		__fuse_get_request(req);		request_wait_answer(fc, req);	}	spin_unlock(&fuse_lock);}static void request_send_nowait(struct fuse_conn *fc, struct fuse_req *req){	spin_lock(&fuse_lock);	if (fc->connected) {		queue_request(fc, req);		spin_unlock(&fuse_lock);	} else {		req->out.h.error = -ENOTCONN;		request_end(fc, req);	}}void request_send_noreply(struct fuse_conn *fc, struct fuse_req *req){	req->isreply = 0;	request_send_nowait(fc, req);}void request_send_background(struct fuse_conn *fc, struct fuse_req *req){	req->isreply = 1;	spin_lock(&fuse_lock);	background_request(fc, req);	spin_unlock(&fuse_lock);	request_send_nowait(fc, req);}void fuse_send_init(struct fuse_conn *fc){	/* This is called from fuse_read_super() so there's guaranteed	   to be a request available */	struct fuse_req *req = do_get_request(fc);	struct fuse_init_in *arg = &req->misc.init_in;	arg->major = FUSE_KERNEL_VERSION;	arg->minor = FUSE_KERNEL_MINOR_VERSION;	req->in.h.opcode = FUSE_INIT;	req->in.numargs = 1;	req->in.args[0].size = sizeof(*arg);	req->in.args[0].value = arg;	req->out.numargs = 1;	/* Variable length arguement used for backward compatibility	   with interface version < 7.5.  Rest of init_out is zeroed	   by do_get_request(), so a short reply is not a problem */	req->out.argvar = 1;	req->out.args[0].size = sizeof(struct fuse_init_out);	req->out.args[0].value = &req->misc.init_out;	request_send_background(fc, req);}/* * Lock the request.  Up to the next unlock_request() there mustn't be * anything that could cause a page-fault.  If the request was already * interrupted bail out. */static inline int lock_request(struct fuse_req *req){	int err = 0;	if (req) {		spin_lock(&fuse_lock);		if (req->interrupted)			err = -ENOENT;		else			req->locked = 1;		spin_unlock(&fuse_lock);	}	return err;}/* * Unlock request.  If it was interrupted during being locked, the * requester thread is currently waiting for it to be unlocked, so * wake it up. */static inline void unlock_request(struct fuse_req *req){	if (req) {		spin_lock(&fuse_lock);		req->locked = 0;		if (req->interrupted)			wake_up(&req->waitq);		spin_unlock(&fuse_lock);	}}struct fuse_copy_state {	int write;	struct fuse_req *req;	const struct iovec *iov;	unsigned long nr_segs;	unsigned long seglen;	unsigned long addr;	struct page *pg;	void *mapaddr;	void *buf;	unsigned len;};static void fuse_copy_init(struct fuse_copy_state *cs, int write,			   struct fuse_req *req, const struct iovec *iov,

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -