⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 file.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 4 页
字号:
		spu_release(ctx);					\	} else if (__acquire == SPU_ATTR_ACQUIRE_SAVED)	{		\		spu_acquire_saved(ctx);					\		ret = __get(ctx);					\		spu_release_saved(ctx);					\	} else								\		ret = __get(ctx);					\									\	return ret;							\}									\DEFINE_SIMPLE_ATTRIBUTE(__name, __##__get, __set, __fmt);static void spufs_signal1_type_set(void *data, u64 val){	struct spu_context *ctx = data;	spu_acquire(ctx);	ctx->ops->signal1_type_set(ctx, val);	spu_release(ctx);}static u64 spufs_signal1_type_get(struct spu_context *ctx){	return ctx->ops->signal1_type_get(ctx);}DEFINE_SPUFS_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get,		       spufs_signal1_type_set, "%llu", SPU_ATTR_ACQUIRE);static void spufs_signal2_type_set(void *data, u64 val){	struct spu_context *ctx = data;	spu_acquire(ctx);	ctx->ops->signal2_type_set(ctx, val);	spu_release(ctx);}static u64 spufs_signal2_type_get(struct spu_context *ctx){	return ctx->ops->signal2_type_get(ctx);}DEFINE_SPUFS_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get,		       spufs_signal2_type_set, "%llu", SPU_ATTR_ACQUIRE);#if SPUFS_MMAP_4Kstatic unsigned long spufs_mss_mmap_nopfn(struct vm_area_struct *vma,					  unsigned long address){	return spufs_ps_nopfn(vma, address, 0x0000, 0x1000);}static struct vm_operations_struct spufs_mss_mmap_vmops = {	.nopfn = spufs_mss_mmap_nopfn,};/* * mmap support for problem state MFC DMA area [0x0000 - 0x0fff]. */static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma){	if (!(vma->vm_flags & VM_SHARED))		return -EINVAL;	vma->vm_flags |= VM_IO | VM_PFNMAP;	vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)				     | _PAGE_NO_CACHE | _PAGE_GUARDED);	vma->vm_ops = &spufs_mss_mmap_vmops;	return 0;}#else /* SPUFS_MMAP_4K */#define spufs_mss_mmap NULL#endif /* !SPUFS_MMAP_4K */static int spufs_mss_open(struct inode *inode, struct file *file){	struct spufs_inode_info *i = SPUFS_I(inode);	struct spu_context *ctx = i->i_ctx;	file->private_data = i->i_ctx;	mutex_lock(&ctx->mapping_lock);	if (!i->i_openers++)		ctx->mss = inode->i_mapping;	mutex_unlock(&ctx->mapping_lock);	return nonseekable_open(inode, file);}static intspufs_mss_release(struct inode *inode, struct file *file){	struct spufs_inode_info *i = SPUFS_I(inode);	struct spu_context *ctx = i->i_ctx;	mutex_lock(&ctx->mapping_lock);	if (!--i->i_openers)		ctx->mss = NULL;	mutex_unlock(&ctx->mapping_lock);	return 0;}static const struct file_operations spufs_mss_fops = {	.open	 = spufs_mss_open,	.release = spufs_mss_release,	.mmap	 = spufs_mss_mmap,};static unsigned long spufs_psmap_mmap_nopfn(struct vm_area_struct *vma,					    unsigned long address){	return spufs_ps_nopfn(vma, address, 0x0000, 0x20000);}static struct vm_operations_struct spufs_psmap_mmap_vmops = {	.nopfn = spufs_psmap_mmap_nopfn,};/* * mmap support for full problem state area [0x00000 - 0x1ffff]. */static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma){	if (!(vma->vm_flags & VM_SHARED))		return -EINVAL;	vma->vm_flags |= VM_IO | VM_PFNMAP;	vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)				     | _PAGE_NO_CACHE | _PAGE_GUARDED);	vma->vm_ops = &spufs_psmap_mmap_vmops;	return 0;}static int spufs_psmap_open(struct inode *inode, struct file *file){	struct spufs_inode_info *i = SPUFS_I(inode);	struct spu_context *ctx = i->i_ctx;	mutex_lock(&ctx->mapping_lock);	file->private_data = i->i_ctx;	if (!i->i_openers++)		ctx->psmap = inode->i_mapping;	mutex_unlock(&ctx->mapping_lock);	return nonseekable_open(inode, file);}static intspufs_psmap_release(struct inode *inode, struct file *file){	struct spufs_inode_info *i = SPUFS_I(inode);	struct spu_context *ctx = i->i_ctx;	mutex_lock(&ctx->mapping_lock);	if (!--i->i_openers)		ctx->psmap = NULL;	mutex_unlock(&ctx->mapping_lock);	return 0;}static const struct file_operations spufs_psmap_fops = {	.open	 = spufs_psmap_open,	.release = spufs_psmap_release,	.mmap	 = spufs_psmap_mmap,};#if SPUFS_MMAP_4Kstatic unsigned long spufs_mfc_mmap_nopfn(struct vm_area_struct *vma,					  unsigned long address){	return spufs_ps_nopfn(vma, address, 0x3000, 0x1000);}static struct vm_operations_struct spufs_mfc_mmap_vmops = {	.nopfn = spufs_mfc_mmap_nopfn,};/* * mmap support for problem state MFC DMA area [0x0000 - 0x0fff]. */static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma){	if (!(vma->vm_flags & VM_SHARED))		return -EINVAL;	vma->vm_flags |= VM_IO | VM_PFNMAP;	vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)				     | _PAGE_NO_CACHE | _PAGE_GUARDED);	vma->vm_ops = &spufs_mfc_mmap_vmops;	return 0;}#else /* SPUFS_MMAP_4K */#define spufs_mfc_mmap NULL#endif /* !SPUFS_MMAP_4K */static int spufs_mfc_open(struct inode *inode, struct file *file){	struct spufs_inode_info *i = SPUFS_I(inode);	struct spu_context *ctx = i->i_ctx;	/* we don't want to deal with DMA into other processes */	if (ctx->owner != current->mm)		return -EINVAL;	if (atomic_read(&inode->i_count) != 1)		return -EBUSY;	mutex_lock(&ctx->mapping_lock);	file->private_data = ctx;	if (!i->i_openers++)		ctx->mfc = inode->i_mapping;	mutex_unlock(&ctx->mapping_lock);	return nonseekable_open(inode, file);}static intspufs_mfc_release(struct inode *inode, struct file *file){	struct spufs_inode_info *i = SPUFS_I(inode);	struct spu_context *ctx = i->i_ctx;	mutex_lock(&ctx->mapping_lock);	if (!--i->i_openers)		ctx->mfc = NULL;	mutex_unlock(&ctx->mapping_lock);	return 0;}/* interrupt-level mfc callback function. */void spufs_mfc_callback(struct spu *spu){	struct spu_context *ctx = spu->ctx;	wake_up_all(&ctx->mfc_wq);	pr_debug("%s %s\n", __FUNCTION__, spu->name);	if (ctx->mfc_fasync) {		u32 free_elements, tagstatus;		unsigned int mask;		/* no need for spu_acquire in interrupt context */		free_elements = ctx->ops->get_mfc_free_elements(ctx);		tagstatus = ctx->ops->read_mfc_tagstatus(ctx);		mask = 0;		if (free_elements & 0xffff)			mask |= POLLOUT;		if (tagstatus & ctx->tagwait)			mask |= POLLIN;		kill_fasync(&ctx->mfc_fasync, SIGIO, mask);	}}static int spufs_read_mfc_tagstatus(struct spu_context *ctx, u32 *status){	/* See if there is one tag group is complete */	/* FIXME we need locking around tagwait */	*status = ctx->ops->read_mfc_tagstatus(ctx) & ctx->tagwait;	ctx->tagwait &= ~*status;	if (*status)		return 1;	/* enable interrupt waiting for any tag group,	   may silently fail if interrupts are already enabled */	ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);	return 0;}static ssize_t spufs_mfc_read(struct file *file, char __user *buffer,			size_t size, loff_t *pos){	struct spu_context *ctx = file->private_data;	int ret = -EINVAL;	u32 status;	if (size != 4)		goto out;	spu_acquire(ctx);	if (file->f_flags & O_NONBLOCK) {		status = ctx->ops->read_mfc_tagstatus(ctx);		if (!(status & ctx->tagwait))			ret = -EAGAIN;		else			ctx->tagwait &= ~status;	} else {		ret = spufs_wait(ctx->mfc_wq,			   spufs_read_mfc_tagstatus(ctx, &status));	}	spu_release(ctx);	if (ret)		goto out;	ret = 4;	if (copy_to_user(buffer, &status, 4))		ret = -EFAULT;out:	return ret;}static int spufs_check_valid_dma(struct mfc_dma_command *cmd){	pr_debug("queueing DMA %x %lx %x %x %x\n", cmd->lsa,		 cmd->ea, cmd->size, cmd->tag, cmd->cmd);	switch (cmd->cmd) {	case MFC_PUT_CMD:	case MFC_PUTF_CMD:	case MFC_PUTB_CMD:	case MFC_GET_CMD:	case MFC_GETF_CMD:	case MFC_GETB_CMD:		break;	default:		pr_debug("invalid DMA opcode %x\n", cmd->cmd);		return -EIO;	}	if ((cmd->lsa & 0xf) != (cmd->ea &0xf)) {		pr_debug("invalid DMA alignment, ea %lx lsa %x\n",				cmd->ea, cmd->lsa);		return -EIO;	}	switch (cmd->size & 0xf) {	case 1:		break;	case 2:		if (cmd->lsa & 1)			goto error;		break;	case 4:		if (cmd->lsa & 3)			goto error;		break;	case 8:		if (cmd->lsa & 7)			goto error;		break;	case 0:		if (cmd->lsa & 15)			goto error;		break;	error:	default:		pr_debug("invalid DMA alignment %x for size %x\n",			cmd->lsa & 0xf, cmd->size);		return -EIO;	}	if (cmd->size > 16 * 1024) {		pr_debug("invalid DMA size %x\n", cmd->size);		return -EIO;	}	if (cmd->tag & 0xfff0) {		/* we reserve the higher tag numbers for kernel use */		pr_debug("invalid DMA tag\n");		return -EIO;	}	if (cmd->class) {		/* not supported in this version */		pr_debug("invalid DMA class\n");		return -EIO;	}	return 0;}static int spu_send_mfc_command(struct spu_context *ctx,				struct mfc_dma_command cmd,				int *error){	*error = ctx->ops->send_mfc_command(ctx, &cmd);	if (*error == -EAGAIN) {		/* wait for any tag group to complete		   so we have space for the new command */		ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);		/* try again, because the queue might be		   empty again */		*error = ctx->ops->send_mfc_command(ctx, &cmd);		if (*error == -EAGAIN)			return 0;	}	return 1;}static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer,			size_t size, loff_t *pos){	struct spu_context *ctx = file->private_data;	struct mfc_dma_command cmd;	int ret = -EINVAL;	if (size != sizeof cmd)		goto out;	ret = -EFAULT;	if (copy_from_user(&cmd, buffer, sizeof cmd))		goto out;	ret = spufs_check_valid_dma(&cmd);	if (ret)		goto out;	ret = spu_acquire_runnable(ctx, 0);	if (ret)		goto out;	if (file->f_flags & O_NONBLOCK) {		ret = ctx->ops->send_mfc_command(ctx, &cmd);	} else {		int status;		ret = spufs_wait(ctx->mfc_wq,				 spu_send_mfc_command(ctx, cmd, &status));		if (status)			ret = status;	}	if (ret)		goto out_unlock;	ctx->tagwait |= 1 << cmd.tag;	ret = size;out_unlock:	spu_release(ctx);out:	return ret;}static unsigned int spufs_mfc_poll(struct file *file,poll_table *wait){	struct spu_context *ctx = file->private_data;	u32 free_elements, tagstatus;	unsigned int mask;	poll_wait(file, &ctx->mfc_wq, wait);	spu_acquire(ctx);	ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2);	free_elements = ctx->ops->get_mfc_free_elements(ctx);	tagstatus = ctx->ops->read_mfc_tagstatus(ctx);	spu_release(ctx);	mask = 0;	if (free_elements & 0xffff)		mask |= POLLOUT | POLLWRNORM;	if (tagstatus & ctx->tagwait)		mask |= POLLIN | POLLRDNORM;	pr_debug("%s: free %d tagstatus %d tagwait %d\n", __FUNCTION__,		free_elements, tagstatus, ctx->tagwait);	return mask;}static int spufs_mfc_flush(struct file *file, fl_owner_t id){	struct spu_context *ctx = file->private_data;	int ret;	spu_acquire(ctx);#if 0/* this currently hangs */	ret = spufs_wait(ctx->mfc_wq,			 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2));	if (ret)		goto out;	ret = spufs_wait(ctx->mfc_wq,			 ctx->ops->read_mfc_tagstatus(ctx) == ctx->tagwait);out:#else	ret = 0;#endif	spu_release(ctx);	return ret;}static int spufs_mfc_fsync(struct file *file, struct dentry *dentry,			   int datasync){	return spufs_mfc_flush(file, NULL);}static int spufs_mfc_fasync(int fd, struct file *file, int on){	struct spu_context *ctx = file->private_data;	return fasync_helper(fd, file, on, &ctx->mfc_fasync);}static const struct file_operations spufs_mfc_fops = {	.open	 = spufs_mfc_open,	.release = spufs_mfc_release,	.read	 = spufs_mfc_read,	.write	 = spufs_mfc_write,	.poll	 = spufs_mfc_poll,	.flush	 = spufs_mfc_flush,	.fsync	 = spufs_mfc_fsync,	.fasync	 = spufs_mfc_fasync,	.mmap	 = spufs_mfc_mmap,};static void spufs_npc_set(void *data, u64 val){	struct spu_context *ctx = data;	spu_acquire(ctx);	ctx->ops->npc_write(ctx, val);	spu_release(ctx);}static u64 spufs_npc_get(struct spu_context *ctx){	return ctx->ops->npc_read(ctx);}DEFINE_SPUFS_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set,		       "0x%llx\n", SPU_ATTR_ACQUIRE);static void spufs_decr_set(void *data, u64 val){	struct spu_context *ctx = data;	struct spu_lscsa *lscsa = ctx->csa.lscsa;	spu_acquire_saved(ctx);	lscsa->decr.slot[0] = (u32) val;	spu_release_saved(ctx);}static u64 spufs_decr_get(struct spu_context *ctx){	struct spu_lscsa *lscsa = ctx->csa.lscsa;	return lscsa->decr.slot[0];}DEFINE_SPUFS_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set,		       "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED);static void spufs_decr_status_set(void *data, u64 val){	struct spu_context *ctx = data;	spu_acquire_saved(ctx);	if (val)

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -