⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 dev.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
/*  FUSE: Filesystem in Userspace  Copyright (C) 2001-2006  Miklos Szeredi <miklos@szeredi.hu>  This program can be distributed under the terms of the GNU GPL.  See the file COPYING.*/#include "fuse_i.h"#include <linux/init.h>#include <linux/module.h>#include <linux/poll.h>#include <linux/uio.h>#include <linux/miscdevice.h>#include <linux/pagemap.h>#include <linux/file.h>#include <linux/slab.h>MODULE_ALIAS_MISCDEV(FUSE_MINOR);static struct kmem_cache *fuse_req_cachep;static struct fuse_conn *fuse_get_conn(struct file *file){	/*	 * Lockless access is OK, because file->private data is set	 * once during mount and is valid until the file is released.	 */	return file->private_data;}static void fuse_request_init(struct fuse_req *req){	memset(req, 0, sizeof(*req));	INIT_LIST_HEAD(&req->list);	INIT_LIST_HEAD(&req->intr_entry);	init_waitqueue_head(&req->waitq);	atomic_set(&req->count, 1);}struct fuse_req *fuse_request_alloc(void){	struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_KERNEL);	if (req)		fuse_request_init(req);	return req;}void fuse_request_free(struct fuse_req *req){	kmem_cache_free(fuse_req_cachep, req);}static void block_sigs(sigset_t *oldset){	sigset_t mask;	siginitsetinv(&mask, sigmask(SIGKILL));	sigprocmask(SIG_BLOCK, &mask, oldset);}static void restore_sigs(sigset_t *oldset){	sigprocmask(SIG_SETMASK, oldset, NULL);}static void __fuse_get_request(struct fuse_req *req){	atomic_inc(&req->count);}/* Must be called with > 1 refcount */static void __fuse_put_request(struct fuse_req *req){	BUG_ON(atomic_read(&req->count) < 2);	atomic_dec(&req->count);}static void fuse_req_init_context(struct fuse_req *req){	req->in.h.uid = current->fsuid;	req->in.h.gid = current->fsgid;	req->in.h.pid = current->pid;}struct fuse_req *fuse_get_req(struct fuse_conn *fc){	struct fuse_req *req;	sigset_t oldset;	int intr;	int err;	atomic_inc(&fc->num_waiting);	block_sigs(&oldset);	intr = wait_event_interruptible(fc->blocked_waitq, !fc->blocked);	restore_sigs(&oldset);	err = -EINTR;	if (intr)		goto out;	err = -ENOTCONN;	if (!fc->connected)		goto out;	req = fuse_request_alloc();	err = -ENOMEM;	if (!req)		goto out;	fuse_req_init_context(req);	req->waiting = 1;	return req; out:	atomic_dec(&fc->num_waiting);	return ERR_PTR(err);}/* * Return request in fuse_file->reserved_req.  However that may * currently be in use.  If that is the case, wait for it to become * available. */static struct fuse_req *get_reserved_req(struct fuse_conn *fc,					 struct file *file){	struct fuse_req *req = NULL;	struct fuse_file *ff = file->private_data;	do {		wait_event(fc->reserved_req_waitq, ff->reserved_req);		spin_lock(&fc->lock);		if (ff->reserved_req) {			req = ff->reserved_req;			ff->reserved_req = NULL;			get_file(file);			req->stolen_file = file;		}		spin_unlock(&fc->lock);	} while (!req);	return req;}/* * Put stolen request back into fuse_file->reserved_req */static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req){	struct file *file = req->stolen_file;	struct fuse_file *ff = file->private_data;	spin_lock(&fc->lock);	fuse_request_init(req);	BUG_ON(ff->reserved_req);	ff->reserved_req = req;	wake_up_all(&fc->reserved_req_waitq);	spin_unlock(&fc->lock);	fput(file);}/* * Gets a requests for a file operation, always succeeds * * This is used for sending the FLUSH request, which must get to * userspace, due to POSIX locks which may need to be unlocked. * * If allocation fails due to OOM, use the reserved request in * fuse_file. * * This is very unlikely to deadlock accidentally, since the * filesystem should not have it's own file open.  If deadlock is * intentional, it can still be broken by "aborting" the filesystem. */struct fuse_req *fuse_get_req_nofail(struct fuse_conn *fc, struct file *file){	struct fuse_req *req;	atomic_inc(&fc->num_waiting);	wait_event(fc->blocked_waitq, !fc->blocked);	req = fuse_request_alloc();	if (!req)		req = get_reserved_req(fc, file);	fuse_req_init_context(req);	req->waiting = 1;	return req;}void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req){	if (atomic_dec_and_test(&req->count)) {		if (req->waiting)			atomic_dec(&fc->num_waiting);		if (req->stolen_file)			put_reserved_req(fc, req);		else			fuse_request_free(req);	}}/* * This function is called when a request is finished.  Either a reply * has arrived or it was aborted (and not yet sent) or some error * occurred during communication with userspace, or the device file * was closed.  The requester thread is woken up (if still waiting), * the 'end' callback is called if given, else the reference to the * request is released * * Called with fc->lock, unlocks it */static void request_end(struct fuse_conn *fc, struct fuse_req *req)	__releases(fc->lock){	void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;	req->end = NULL;	list_del(&req->list);	list_del(&req->intr_entry);	req->state = FUSE_REQ_FINISHED;	if (req->background) {		if (fc->num_background == FUSE_MAX_BACKGROUND) {			fc->blocked = 0;			wake_up_all(&fc->blocked_waitq);		}		if (fc->num_background == FUSE_CONGESTION_THRESHOLD) {			clear_bdi_congested(&fc->bdi, READ);			clear_bdi_congested(&fc->bdi, WRITE);		}		fc->num_background--;	}	spin_unlock(&fc->lock);	wake_up(&req->waitq);	if (end)		end(fc, req);	else		fuse_put_request(fc, req);}static void wait_answer_interruptible(struct fuse_conn *fc,				      struct fuse_req *req){	if (signal_pending(current))		return;	spin_unlock(&fc->lock);	wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED);	spin_lock(&fc->lock);}static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req){	list_add_tail(&req->intr_entry, &fc->interrupts);	wake_up(&fc->waitq);	kill_fasync(&fc->fasync, SIGIO, POLL_IN);}/* Called with fc->lock held.  Releases, and then reacquires it. */static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req){	if (!fc->no_interrupt) {		/* Any signal may interrupt this */		wait_answer_interruptible(fc, req);		if (req->aborted)			goto aborted;		if (req->state == FUSE_REQ_FINISHED)			return;		req->interrupted = 1;		if (req->state == FUSE_REQ_SENT)			queue_interrupt(fc, req);	}	if (!req->force) {		sigset_t oldset;		/* Only fatal signals may interrupt this */		block_sigs(&oldset);		wait_answer_interruptible(fc, req);		restore_sigs(&oldset);		if (req->aborted)			goto aborted;		if (req->state == FUSE_REQ_FINISHED)			return;		/* Request is not yet in userspace, bail out */		if (req->state == FUSE_REQ_PENDING) {			list_del(&req->list);			__fuse_put_request(req);			req->out.h.error = -EINTR;			return;		}	}	/*	 * Either request is already in userspace, or it was forced.	 * Wait it out.	 */	spin_unlock(&fc->lock);	wait_event(req->waitq, req->state == FUSE_REQ_FINISHED);	spin_lock(&fc->lock);	if (!req->aborted)		return; aborted:	BUG_ON(req->state != FUSE_REQ_FINISHED);	if (req->locked) {		/* This is uninterruptible sleep, because data is		   being copied to/from the buffers of req.  During		   locked state, there mustn't be any filesystem		   operation (e.g. page fault), since that could lead		   to deadlock */		spin_unlock(&fc->lock);		wait_event(req->waitq, !req->locked);		spin_lock(&fc->lock);	}}static unsigned len_args(unsigned numargs, struct fuse_arg *args){	unsigned nbytes = 0;	unsigned i;	for (i = 0; i < numargs; i++)		nbytes += args[i].size;	return nbytes;}static u64 fuse_get_unique(struct fuse_conn *fc) { 	fc->reqctr++; 	/* zero is special */ 	if (fc->reqctr == 0) 		fc->reqctr = 1;	return fc->reqctr;}static void queue_request(struct fuse_conn *fc, struct fuse_req *req){	req->in.h.unique = fuse_get_unique(fc);	req->in.h.len = sizeof(struct fuse_in_header) +		len_args(req->in.numargs, (struct fuse_arg *) req->in.args);	list_add_tail(&req->list, &fc->pending);	req->state = FUSE_REQ_PENDING;	if (!req->waiting) {		req->waiting = 1;		atomic_inc(&fc->num_waiting);	}	wake_up(&fc->waitq);	kill_fasync(&fc->fasync, SIGIO, POLL_IN);}void request_send(struct fuse_conn *fc, struct fuse_req *req){	req->isreply = 1;	spin_lock(&fc->lock);	if (!fc->connected)		req->out.h.error = -ENOTCONN;	else if (fc->conn_error)		req->out.h.error = -ECONNREFUSED;	else {		queue_request(fc, req);		/* acquire extra reference, since request is still needed		   after request_end() */		__fuse_get_request(req);		request_wait_answer(fc, req);	}	spin_unlock(&fc->lock);}static void request_send_nowait(struct fuse_conn *fc, struct fuse_req *req){	spin_lock(&fc->lock);	if (fc->connected) {		req->background = 1;		fc->num_background++;		if (fc->num_background == FUSE_MAX_BACKGROUND)			fc->blocked = 1;		if (fc->num_background == FUSE_CONGESTION_THRESHOLD) {			set_bdi_congested(&fc->bdi, READ);			set_bdi_congested(&fc->bdi, WRITE);		}		queue_request(fc, req);		spin_unlock(&fc->lock);	} else {		req->out.h.error = -ENOTCONN;		request_end(fc, req);	}}void request_send_noreply(struct fuse_conn *fc, struct fuse_req *req){	req->isreply = 0;	request_send_nowait(fc, req);}void request_send_background(struct fuse_conn *fc, struct fuse_req *req){	req->isreply = 1;	request_send_nowait(fc, req);}/* * Lock the request.  Up to the next unlock_request() there mustn't be * anything that could cause a page-fault.  If the request was already * aborted bail out. */static int lock_request(struct fuse_conn *fc, struct fuse_req *req){	int err = 0;	if (req) {		spin_lock(&fc->lock);		if (req->aborted)			err = -ENOENT;		else			req->locked = 1;		spin_unlock(&fc->lock);	}	return err;}/* * Unlock request.  If it was aborted during being locked, the * requester thread is currently waiting for it to be unlocked, so * wake it up. */static void unlock_request(struct fuse_conn *fc, struct fuse_req *req){	if (req) {		spin_lock(&fc->lock);		req->locked = 0;		if (req->aborted)			wake_up(&req->waitq);		spin_unlock(&fc->lock);	}}struct fuse_copy_state {	struct fuse_conn *fc;	int write;	struct fuse_req *req;	const struct iovec *iov;	unsigned long nr_segs;	unsigned long seglen;	unsigned long addr;	struct page *pg;	void *mapaddr;	void *buf;	unsigned len;};static void fuse_copy_init(struct fuse_copy_state *cs, struct fuse_conn *fc,			   int write, struct fuse_req *req,			   const struct iovec *iov, unsigned long nr_segs){	memset(cs, 0, sizeof(*cs));	cs->fc = fc;	cs->write = write;	cs->req = req;	cs->iov = iov;	cs->nr_segs = nr_segs;}/* Unmap and put previous page of userspace buffer */static void fuse_copy_finish(struct fuse_copy_state *cs){	if (cs->mapaddr) {		kunmap_atomic(cs->mapaddr, KM_USER0);		if (cs->write) {			flush_dcache_page(cs->pg);			set_page_dirty_lock(cs->pg);		}		put_page(cs->pg);		cs->mapaddr = NULL;	}}/* * Get another pagefull of userspace buffer, and map it to kernel * address space, and lock request */static int fuse_copy_fill(struct fuse_copy_state *cs){	unsigned long offset;	int err;	unlock_request(cs->fc, cs->req);	fuse_copy_finish(cs);	if (!cs->seglen) {		BUG_ON(!cs->nr_segs);		cs->seglen = cs->iov[0].iov_len;		cs->addr = (unsigned long) cs->iov[0].iov_base;		cs->iov ++;		cs->nr_segs --;	}	down_read(&current->mm->mmap_sem);	err = get_user_pages(current, current->mm, cs->addr, 1, cs->write, 0,			     &cs->pg, NULL);	up_read(&current->mm->mmap_sem);	if (err < 0)		return err;	BUG_ON(err != 1);	offset = cs->addr % PAGE_SIZE;	cs->mapaddr = kmap_atomic(cs->pg, KM_USER0);	cs->buf = cs->mapaddr + offset;	cs->len = min(PAGE_SIZE - offset, cs->seglen);	cs->seglen -= cs->len;	cs->addr += cs->len;	return lock_request(cs->fc, cs->req);}/* Do as much copy to/from userspace buffer as we can */static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size){	unsigned ncpy = min(*size, cs->len);	if (val) {		if (cs->write)			memcpy(cs->buf, *val, ncpy);		else			memcpy(*val, cs->buf, ncpy);		*val += ncpy;	}	*size -= ncpy;	cs->len -= ncpy;	cs->buf += ncpy;	return ncpy;}/* * Copy a page in the request to/from the userspace buffer.  Must be

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -