⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 dev.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
 * done atomically */static int fuse_copy_page(struct fuse_copy_state *cs, struct page *page,			  unsigned offset, unsigned count, int zeroing){	if (page && zeroing && count < PAGE_SIZE) {		void *mapaddr = kmap_atomic(page, KM_USER1);		memset(mapaddr, 0, PAGE_SIZE);		kunmap_atomic(mapaddr, KM_USER1);	}	while (count) {		int err;		if (!cs->len && (err = fuse_copy_fill(cs)))			return err;		if (page) {			void *mapaddr = kmap_atomic(page, KM_USER1);			void *buf = mapaddr + offset;			offset += fuse_copy_do(cs, &buf, &count);			kunmap_atomic(mapaddr, KM_USER1);		} else			offset += fuse_copy_do(cs, NULL, &count);	}	if (page && !cs->write)		flush_dcache_page(page);	return 0;}/* Copy pages in the request to/from userspace buffer */static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,			   int zeroing){	unsigned i;	struct fuse_req *req = cs->req;	unsigned offset = req->page_offset;	unsigned count = min(nbytes, (unsigned) PAGE_SIZE - offset);	for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {		struct page *page = req->pages[i];		int err = fuse_copy_page(cs, page, offset, count, zeroing);		if (err)			return err;		nbytes -= count;		count = min(nbytes, (unsigned) PAGE_SIZE);		offset = 0;	}	return 0;}/* Copy a single argument in the request to/from userspace buffer */static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size){	while (size) {		int err;		if (!cs->len && (err = fuse_copy_fill(cs)))			return err;		fuse_copy_do(cs, &val, &size);	}	return 0;}/* Copy request arguments to/from userspace buffer */static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,			  unsigned argpages, struct fuse_arg *args,			  int zeroing){	int err = 0;	unsigned i;	for (i = 0; !err && i < numargs; i++)  {		struct fuse_arg *arg = &args[i];		if (i == numargs - 1 && argpages)			err = fuse_copy_pages(cs, arg->size, zeroing);		else			err = fuse_copy_one(cs, arg->value, arg->size);	}	return err;}static int request_pending(struct fuse_conn *fc){	return !list_empty(&fc->pending) || !list_empty(&fc->interrupts);}/* Wait until a request is available on the pending list */static void request_wait(struct fuse_conn *fc){	DECLARE_WAITQUEUE(wait, current);	add_wait_queue_exclusive(&fc->waitq, &wait);	while (fc->connected && !request_pending(fc)) {		set_current_state(TASK_INTERRUPTIBLE);		if (signal_pending(current))			break;		spin_unlock(&fc->lock);		schedule();		spin_lock(&fc->lock);	}	set_current_state(TASK_RUNNING);	remove_wait_queue(&fc->waitq, &wait);}/* * Transfer an interrupt request to userspace * * Unlike other requests this is assembled on demand, without a need * to allocate a separate fuse_req structure. * * Called with fc->lock held, releases it */static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_req *req,			       const struct iovec *iov, unsigned long nr_segs)	__releases(fc->lock){	struct fuse_copy_state cs;	struct fuse_in_header ih;	struct fuse_interrupt_in arg;	unsigned reqsize = sizeof(ih) + sizeof(arg);	int err;	list_del_init(&req->intr_entry);	req->intr_unique = fuse_get_unique(fc);	memset(&ih, 0, sizeof(ih));	memset(&arg, 0, sizeof(arg));	ih.len = reqsize;	ih.opcode = FUSE_INTERRUPT;	ih.unique = req->intr_unique;	arg.unique = req->in.h.unique;	spin_unlock(&fc->lock);	if (iov_length(iov, nr_segs) < reqsize)		return -EINVAL;	fuse_copy_init(&cs, fc, 1, NULL, iov, nr_segs);	err = fuse_copy_one(&cs, &ih, sizeof(ih));	if (!err)		err = fuse_copy_one(&cs, &arg, sizeof(arg));	fuse_copy_finish(&cs);	return err ? err : reqsize;}/* * Read a single request into the userspace filesystem's buffer.  This * function waits until a request is available, then removes it from * the pending list and copies request data to userspace buffer.  If * no reply is needed (FORGET) or request has been aborted or there * was an error during the copying then it's finished by calling * request_end().  Otherwise add it to the processing list, and set * the 'sent' flag. */static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,			      unsigned long nr_segs, loff_t pos){	int err;	struct fuse_req *req;	struct fuse_in *in;	struct fuse_copy_state cs;	unsigned reqsize;	struct file *file = iocb->ki_filp;	struct fuse_conn *fc = fuse_get_conn(file);	if (!fc)		return -EPERM; restart:	spin_lock(&fc->lock);	err = -EAGAIN;	if ((file->f_flags & O_NONBLOCK) && fc->connected &&	    !request_pending(fc))		goto err_unlock;	request_wait(fc);	err = -ENODEV;	if (!fc->connected)		goto err_unlock;	err = -ERESTARTSYS;	if (!request_pending(fc))		goto err_unlock;	if (!list_empty(&fc->interrupts)) {		req = list_entry(fc->interrupts.next, struct fuse_req,				 intr_entry);		return fuse_read_interrupt(fc, req, iov, nr_segs);	}	req = list_entry(fc->pending.next, struct fuse_req, list);	req->state = FUSE_REQ_READING;	list_move(&req->list, &fc->io);	in = &req->in;	reqsize = in->h.len;	/* If request is too large, reply with an error and restart the read */	if (iov_length(iov, nr_segs) < reqsize) {		req->out.h.error = -EIO;		/* SETXATTR is special, since it may contain too large data */		if (in->h.opcode == FUSE_SETXATTR)			req->out.h.error = -E2BIG;		request_end(fc, req);		goto restart;	}	spin_unlock(&fc->lock);	fuse_copy_init(&cs, fc, 1, req, iov, nr_segs);	err = fuse_copy_one(&cs, &in->h, sizeof(in->h));	if (!err)		err = fuse_copy_args(&cs, in->numargs, in->argpages,				     (struct fuse_arg *) in->args, 0);	fuse_copy_finish(&cs);	spin_lock(&fc->lock);	req->locked = 0;	if (req->aborted) {		request_end(fc, req);		return -ENODEV;	}	if (err) {		req->out.h.error = -EIO;		request_end(fc, req);		return err;	}	if (!req->isreply)		request_end(fc, req);	else {		req->state = FUSE_REQ_SENT;		list_move_tail(&req->list, &fc->processing);		if (req->interrupted)			queue_interrupt(fc, req);		spin_unlock(&fc->lock);	}	return reqsize; err_unlock:	spin_unlock(&fc->lock);	return err;}/* Look up request on processing list by unique ID */static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique){	struct list_head *entry;	list_for_each(entry, &fc->processing) {		struct fuse_req *req;		req = list_entry(entry, struct fuse_req, list);		if (req->in.h.unique == unique || req->intr_unique == unique)			return req;	}	return NULL;}static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,			 unsigned nbytes){	unsigned reqsize = sizeof(struct fuse_out_header);	if (out->h.error)		return nbytes != reqsize ? -EINVAL : 0;	reqsize += len_args(out->numargs, out->args);	if (reqsize < nbytes || (reqsize > nbytes && !out->argvar))		return -EINVAL;	else if (reqsize > nbytes) {		struct fuse_arg *lastarg = &out->args[out->numargs-1];		unsigned diffsize = reqsize - nbytes;		if (diffsize > lastarg->size)			return -EINVAL;		lastarg->size -= diffsize;	}	return fuse_copy_args(cs, out->numargs, out->argpages, out->args,			      out->page_zeroing);}/* * Write a single reply to a request.  First the header is copied from * the write buffer.  The request is then searched on the processing * list by the unique ID found in the header.  If found, then remove * it from the list and copy the rest of the buffer to the request. * The request is finished by calling request_end() */static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,			       unsigned long nr_segs, loff_t pos){	int err;	unsigned nbytes = iov_length(iov, nr_segs);	struct fuse_req *req;	struct fuse_out_header oh;	struct fuse_copy_state cs;	struct fuse_conn *fc = fuse_get_conn(iocb->ki_filp);	if (!fc)		return -EPERM;	fuse_copy_init(&cs, fc, 0, NULL, iov, nr_segs);	if (nbytes < sizeof(struct fuse_out_header))		return -EINVAL;	err = fuse_copy_one(&cs, &oh, sizeof(oh));	if (err)		goto err_finish;	err = -EINVAL;	if (!oh.unique || oh.error <= -1000 || oh.error > 0 ||	    oh.len != nbytes)		goto err_finish;	spin_lock(&fc->lock);	err = -ENOENT;	if (!fc->connected)		goto err_unlock;	req = request_find(fc, oh.unique);	if (!req)		goto err_unlock;	if (req->aborted) {		spin_unlock(&fc->lock);		fuse_copy_finish(&cs);		spin_lock(&fc->lock);		request_end(fc, req);		return -ENOENT;	}	/* Is it an interrupt reply? */	if (req->intr_unique == oh.unique) {		err = -EINVAL;		if (nbytes != sizeof(struct fuse_out_header))			goto err_unlock;		if (oh.error == -ENOSYS)			fc->no_interrupt = 1;		else if (oh.error == -EAGAIN)			queue_interrupt(fc, req);		spin_unlock(&fc->lock);		fuse_copy_finish(&cs);		return nbytes;	}	req->state = FUSE_REQ_WRITING;	list_move(&req->list, &fc->io);	req->out.h = oh;	req->locked = 1;	cs.req = req;	spin_unlock(&fc->lock);	err = copy_out_args(&cs, &req->out, nbytes);	fuse_copy_finish(&cs);	spin_lock(&fc->lock);	req->locked = 0;	if (!err) {		if (req->aborted)			err = -ENOENT;	} else if (!req->aborted)		req->out.h.error = -EIO;	request_end(fc, req);	return err ? err : nbytes; err_unlock:	spin_unlock(&fc->lock); err_finish:	fuse_copy_finish(&cs);	return err;}static unsigned fuse_dev_poll(struct file *file, poll_table *wait){	unsigned mask = POLLOUT | POLLWRNORM;	struct fuse_conn *fc = fuse_get_conn(file);	if (!fc)		return POLLERR;	poll_wait(file, &fc->waitq, wait);	spin_lock(&fc->lock);	if (!fc->connected)		mask = POLLERR;	else if (request_pending(fc))		mask |= POLLIN | POLLRDNORM;	spin_unlock(&fc->lock);	return mask;}/* * Abort all requests on the given list (pending or processing) * * This function releases and reacquires fc->lock */static void end_requests(struct fuse_conn *fc, struct list_head *head){	while (!list_empty(head)) {		struct fuse_req *req;		req = list_entry(head->next, struct fuse_req, list);		req->out.h.error = -ECONNABORTED;		request_end(fc, req);		spin_lock(&fc->lock);	}}/* * Abort requests under I/O * * The requests are set to aborted and finished, and the request * waiter is woken up.  This will make request_wait_answer() wait * until the request is unlocked and then return. * * If the request is asynchronous, then the end function needs to be * called after waiting for the request to be unlocked (if it was * locked). */static void end_io_requests(struct fuse_conn *fc){	while (!list_empty(&fc->io)) {		struct fuse_req *req =			list_entry(fc->io.next, struct fuse_req, list);		void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;		req->aborted = 1;		req->out.h.error = -ECONNABORTED;		req->state = FUSE_REQ_FINISHED;		list_del_init(&req->list);		wake_up(&req->waitq);		if (end) {			req->end = NULL;			/* The end function will consume this reference */			__fuse_get_request(req);			spin_unlock(&fc->lock);			wait_event(req->waitq, !req->locked);			end(fc, req);			spin_lock(&fc->lock);		}	}}/* * Abort all requests. * * Emergency exit in case of a malicious or accidental deadlock, or * just a hung filesystem. * * The same effect is usually achievable through killing the * filesystem daemon and all users of the filesystem.  The exception * is the combination of an asynchronous request and the tricky * deadlock (see Documentation/filesystems/fuse.txt). * * During the aborting, progression of requests from the pending and * processing lists onto the io list, and progression of new requests * onto the pending list is prevented by req->connected being false. * * Progression of requests under I/O to the processing list is * prevented by the req->aborted flag being true for these requests. * For this reason requests on the io list must be aborted first. */void fuse_abort_conn(struct fuse_conn *fc){	spin_lock(&fc->lock);	if (fc->connected) {		fc->connected = 0;		fc->blocked = 0;		end_io_requests(fc);		end_requests(fc, &fc->pending);		end_requests(fc, &fc->processing);		wake_up_all(&fc->waitq);		wake_up_all(&fc->blocked_waitq);		kill_fasync(&fc->fasync, SIGIO, POLL_IN);	}	spin_unlock(&fc->lock);}static int fuse_dev_release(struct inode *inode, struct file *file){	struct fuse_conn *fc = fuse_get_conn(file);	if (fc) {		spin_lock(&fc->lock);		fc->connected = 0;		end_requests(fc, &fc->pending);		end_requests(fc, &fc->processing);		spin_unlock(&fc->lock);		fasync_helper(-1, file, 0, &fc->fasync);		fuse_conn_put(fc);	}	return 0;}static int fuse_dev_fasync(int fd, struct file *file, int on){	struct fuse_conn *fc = fuse_get_conn(file);	if (!fc)		return -EPERM;	/* No locking - fasync_helper does its own locking */	return fasync_helper(fd, file, on, &fc->fasync);}const struct file_operations fuse_dev_operations = {	.owner		= THIS_MODULE,	.llseek		= no_llseek,	.read		= do_sync_read,	.aio_read	= fuse_dev_read,	.write		= do_sync_write,	.aio_write	= fuse_dev_write,	.poll		= fuse_dev_poll,	.release	= fuse_dev_release,	.fasync		= fuse_dev_fasync,};static struct miscdevice fuse_miscdevice = {	.minor = FUSE_MINOR,	.name  = "fuse",	.fops = &fuse_dev_operations,};int __init fuse_dev_init(void){	int err = -ENOMEM;	fuse_req_cachep = kmem_cache_create("fuse_request",					    sizeof(struct fuse_req),					    0, 0, NULL);	if (!fuse_req_cachep)		goto out;	err = misc_register(&fuse_miscdevice);	if (err)		goto out_cache_clean;	return 0; out_cache_clean:	kmem_cache_destroy(fuse_req_cachep); out:	return err;}void fuse_dev_cleanup(void){	misc_deregister(&fuse_miscdevice);	kmem_cache_destroy(fuse_req_cachep);}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -