⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 file.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 4 页
字号:
{	return ctx->ops->ibox_read(ctx, data);}static int spufs_ibox_fasync(int fd, struct file *file, int on){	struct spu_context *ctx = file->private_data;	return fasync_helper(fd, file, on, &ctx->ibox_fasync);}/* interrupt-level ibox callback function. */void spufs_ibox_callback(struct spu *spu){	struct spu_context *ctx = spu->ctx;	wake_up_all(&ctx->ibox_wq);	kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN);}/* * Read as many bytes from the interrupt mailbox as possible, until * one of the conditions becomes true: * * - no more data available in the mailbox * - end of the user provided buffer * - end of the mapped area * * If the file is opened without O_NONBLOCK, we wait here until * any data is available, but return when we have been able to * read something. */static ssize_t spufs_ibox_read(struct file *file, char __user *buf,			size_t len, loff_t *pos){	struct spu_context *ctx = file->private_data;	u32 ibox_data, __user *udata;	ssize_t count;	if (len < 4)		return -EINVAL;	if (!access_ok(VERIFY_WRITE, buf, len))		return -EFAULT;	udata = (void __user *)buf;	spu_acquire(ctx);	/* wait only for the first element */	count = 0;	if (file->f_flags & O_NONBLOCK) {		if (!spu_ibox_read(ctx, &ibox_data))			count = -EAGAIN;	} else {		count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data));	}	if (count)		goto out;	/* if we can't write at all, return -EFAULT */	count = __put_user(ibox_data, udata);	if (count)		goto out;	for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {		int ret;		ret = ctx->ops->ibox_read(ctx, &ibox_data);		if (ret == 0)			break;		/*		 * at the end of the mapped area, we can fault		 * but still need to return the data we have		 * read successfully so far.		 */		ret = __put_user(ibox_data, udata);		if (ret)			break;	}out:	spu_release(ctx);	return count;}static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait){	struct spu_context *ctx = file->private_data;	unsigned int mask;	poll_wait(file, &ctx->ibox_wq, wait);	spu_acquire(ctx);	mask = ctx->ops->mbox_stat_poll(ctx, POLLIN | POLLRDNORM);	spu_release(ctx);	return mask;}static const struct file_operations spufs_ibox_fops = {	.open	= spufs_pipe_open,	.read	= spufs_ibox_read,	.poll	= spufs_ibox_poll,	.fasync	= spufs_ibox_fasync,};static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf,			size_t len, loff_t *pos){	struct spu_context *ctx = file->private_data;	u32 ibox_stat;	if (len < 4)		return -EINVAL;	spu_acquire(ctx);	ibox_stat = (ctx->ops->mbox_stat_read(ctx) >> 16) & 0xff;	spu_release(ctx);	if (copy_to_user(buf, &ibox_stat, sizeof ibox_stat))		return -EFAULT;	return 4;}static const struct file_operations spufs_ibox_stat_fops = {	.open	= spufs_pipe_open,	.read	= spufs_ibox_stat_read,};/* low-level mailbox write */size_t spu_wbox_write(struct spu_context *ctx, u32 data){	return ctx->ops->wbox_write(ctx, data);}static int spufs_wbox_fasync(int fd, struct file *file, int on){	struct spu_context *ctx = file->private_data;	int ret;	ret = fasync_helper(fd, file, on, &ctx->wbox_fasync);	return ret;}/* interrupt-level wbox callback function. */void spufs_wbox_callback(struct spu *spu){	struct spu_context *ctx = spu->ctx;	wake_up_all(&ctx->wbox_wq);	kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT);}/* * Write as many bytes to the interrupt mailbox as possible, until * one of the conditions becomes true: * * - the mailbox is full * - end of the user provided buffer * - end of the mapped area * * If the file is opened without O_NONBLOCK, we wait here until * space is availabyl, but return when we have been able to * write something. */static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,			size_t len, loff_t *pos){	struct spu_context *ctx = file->private_data;	u32 wbox_data, __user *udata;	ssize_t count;	if (len < 4)		return -EINVAL;	udata = (void __user *)buf;	if (!access_ok(VERIFY_READ, buf, len))		return -EFAULT;	if (__get_user(wbox_data, udata))		return -EFAULT;	spu_acquire(ctx);	/*	 * make sure we can at least write one element, by waiting	 * in case of !O_NONBLOCK	 */	count = 0;	if (file->f_flags & O_NONBLOCK) {		if (!spu_wbox_write(ctx, wbox_data))			count = -EAGAIN;	} else {		count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data));	}	if (count)		goto out;	/* write as much as possible */	for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {		int ret;		ret = __get_user(wbox_data, udata);		if (ret)			break;		ret = spu_wbox_write(ctx, wbox_data);		if (ret == 0)			break;	}out:	spu_release(ctx);	return count;}static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait){	struct spu_context *ctx = file->private_data;	unsigned int mask;	poll_wait(file, &ctx->wbox_wq, wait);	spu_acquire(ctx);	mask = ctx->ops->mbox_stat_poll(ctx, POLLOUT | POLLWRNORM);	spu_release(ctx);	return mask;}static const struct file_operations spufs_wbox_fops = {	.open	= spufs_pipe_open,	.write	= spufs_wbox_write,	.poll	= spufs_wbox_poll,	.fasync	= spufs_wbox_fasync,};static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf,			size_t len, loff_t *pos){	struct spu_context *ctx = file->private_data;	u32 wbox_stat;	if (len < 4)		return -EINVAL;	spu_acquire(ctx);	wbox_stat = (ctx->ops->mbox_stat_read(ctx) >> 8) & 0xff;	spu_release(ctx);	if (copy_to_user(buf, &wbox_stat, sizeof wbox_stat))		return -EFAULT;	return 4;}static const struct file_operations spufs_wbox_stat_fops = {	.open	= spufs_pipe_open,	.read	= spufs_wbox_stat_read,};static int spufs_signal1_open(struct inode *inode, struct file *file){	struct spufs_inode_info *i = SPUFS_I(inode);	struct spu_context *ctx = i->i_ctx;	mutex_lock(&ctx->mapping_lock);	file->private_data = ctx;	if (!i->i_openers++)		ctx->signal1 = inode->i_mapping;	mutex_unlock(&ctx->mapping_lock);	return nonseekable_open(inode, file);}static intspufs_signal1_release(struct inode *inode, struct file *file){	struct spufs_inode_info *i = SPUFS_I(inode);	struct spu_context *ctx = i->i_ctx;	mutex_lock(&ctx->mapping_lock);	if (!--i->i_openers)		ctx->signal1 = NULL;	mutex_unlock(&ctx->mapping_lock);	return 0;}static ssize_t __spufs_signal1_read(struct spu_context *ctx, char __user *buf,			size_t len, loff_t *pos){	int ret = 0;	u32 data;	if (len < 4)		return -EINVAL;	if (ctx->csa.spu_chnlcnt_RW[3]) {		data = ctx->csa.spu_chnldata_RW[3];		ret = 4;	}	if (!ret)		goto out;	if (copy_to_user(buf, &data, 4))		return -EFAULT;out:	return ret;}static ssize_t spufs_signal1_read(struct file *file, char __user *buf,			size_t len, loff_t *pos){	int ret;	struct spu_context *ctx = file->private_data;	spu_acquire_saved(ctx);	ret = __spufs_signal1_read(ctx, buf, len, pos);	spu_release_saved(ctx);	return ret;}static ssize_t spufs_signal1_write(struct file *file, const char __user *buf,			size_t len, loff_t *pos){	struct spu_context *ctx;	u32 data;	ctx = file->private_data;	if (len < 4)		return -EINVAL;	if (copy_from_user(&data, buf, 4))		return -EFAULT;	spu_acquire(ctx);	ctx->ops->signal1_write(ctx, data);	spu_release(ctx);	return 4;}static unsigned long spufs_signal1_mmap_nopfn(struct vm_area_struct *vma,					      unsigned long address){#if PAGE_SIZE == 0x1000	return spufs_ps_nopfn(vma, address, 0x14000, 0x1000);#elif PAGE_SIZE == 0x10000	/* For 64k pages, both signal1 and signal2 can be used to mmap the whole	 * signal 1 and 2 area	 */	return spufs_ps_nopfn(vma, address, 0x10000, 0x10000);#else#error unsupported page size#endif}static struct vm_operations_struct spufs_signal1_mmap_vmops = {	.nopfn = spufs_signal1_mmap_nopfn,};static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma){	if (!(vma->vm_flags & VM_SHARED))		return -EINVAL;	vma->vm_flags |= VM_IO | VM_PFNMAP;	vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)				     | _PAGE_NO_CACHE | _PAGE_GUARDED);	vma->vm_ops = &spufs_signal1_mmap_vmops;	return 0;}static const struct file_operations spufs_signal1_fops = {	.open = spufs_signal1_open,	.release = spufs_signal1_release,	.read = spufs_signal1_read,	.write = spufs_signal1_write,	.mmap = spufs_signal1_mmap,};static const struct file_operations spufs_signal1_nosched_fops = {	.open = spufs_signal1_open,	.release = spufs_signal1_release,	.write = spufs_signal1_write,	.mmap = spufs_signal1_mmap,};static int spufs_signal2_open(struct inode *inode, struct file *file){	struct spufs_inode_info *i = SPUFS_I(inode);	struct spu_context *ctx = i->i_ctx;	mutex_lock(&ctx->mapping_lock);	file->private_data = ctx;	if (!i->i_openers++)		ctx->signal2 = inode->i_mapping;	mutex_unlock(&ctx->mapping_lock);	return nonseekable_open(inode, file);}static intspufs_signal2_release(struct inode *inode, struct file *file){	struct spufs_inode_info *i = SPUFS_I(inode);	struct spu_context *ctx = i->i_ctx;	mutex_lock(&ctx->mapping_lock);	if (!--i->i_openers)		ctx->signal2 = NULL;	mutex_unlock(&ctx->mapping_lock);	return 0;}static ssize_t __spufs_signal2_read(struct spu_context *ctx, char __user *buf,			size_t len, loff_t *pos){	int ret = 0;	u32 data;	if (len < 4)		return -EINVAL;	if (ctx->csa.spu_chnlcnt_RW[4]) {		data =  ctx->csa.spu_chnldata_RW[4];		ret = 4;	}	if (!ret)		goto out;	if (copy_to_user(buf, &data, 4))		return -EFAULT;out:	return ret;}static ssize_t spufs_signal2_read(struct file *file, char __user *buf,			size_t len, loff_t *pos){	struct spu_context *ctx = file->private_data;	int ret;	spu_acquire_saved(ctx);	ret = __spufs_signal2_read(ctx, buf, len, pos);	spu_release_saved(ctx);	return ret;}static ssize_t spufs_signal2_write(struct file *file, const char __user *buf,			size_t len, loff_t *pos){	struct spu_context *ctx;	u32 data;	ctx = file->private_data;	if (len < 4)		return -EINVAL;	if (copy_from_user(&data, buf, 4))		return -EFAULT;	spu_acquire(ctx);	ctx->ops->signal2_write(ctx, data);	spu_release(ctx);	return 4;}#if SPUFS_MMAP_4Kstatic unsigned long spufs_signal2_mmap_nopfn(struct vm_area_struct *vma,					      unsigned long address){#if PAGE_SIZE == 0x1000	return spufs_ps_nopfn(vma, address, 0x1c000, 0x1000);#elif PAGE_SIZE == 0x10000	/* For 64k pages, both signal1 and signal2 can be used to mmap the whole	 * signal 1 and 2 area	 */	return spufs_ps_nopfn(vma, address, 0x10000, 0x10000);#else#error unsupported page size#endif}static struct vm_operations_struct spufs_signal2_mmap_vmops = {	.nopfn = spufs_signal2_mmap_nopfn,};static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma){	if (!(vma->vm_flags & VM_SHARED))		return -EINVAL;	vma->vm_flags |= VM_IO | VM_PFNMAP;	vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)				     | _PAGE_NO_CACHE | _PAGE_GUARDED);	vma->vm_ops = &spufs_signal2_mmap_vmops;	return 0;}#else /* SPUFS_MMAP_4K */#define spufs_signal2_mmap NULL#endif /* !SPUFS_MMAP_4K */static const struct file_operations spufs_signal2_fops = {	.open = spufs_signal2_open,	.release = spufs_signal2_release,	.read = spufs_signal2_read,	.write = spufs_signal2_write,	.mmap = spufs_signal2_mmap,};static const struct file_operations spufs_signal2_nosched_fops = {	.open = spufs_signal2_open,	.release = spufs_signal2_release,	.write = spufs_signal2_write,	.mmap = spufs_signal2_mmap,};/* * This is a wrapper around DEFINE_SIMPLE_ATTRIBUTE which does the * work of acquiring (or not) the SPU context before calling through * to the actual get routine. The set routine is called directly. */#define SPU_ATTR_NOACQUIRE	0#define SPU_ATTR_ACQUIRE	1#define SPU_ATTR_ACQUIRE_SAVED	2#define DEFINE_SPUFS_ATTRIBUTE(__name, __get, __set, __fmt, __acquire)	\static u64 __##__get(void *data)					\{									\	struct spu_context *ctx = data;					\	u64 ret;							\									\	if (__acquire == SPU_ATTR_ACQUIRE) {				\		spu_acquire(ctx);					\		ret = __get(ctx);					\

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -