⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ffb_drv.c

📁 内核linux2.4.20,可跟rtlinux3.2打补丁 组成实时linux系统,编译内核
💻 C
📖 第 1 页 / 共 2 页
字号:
			return -EFAULT;	}	ret = copy_to_user((drm_version_t *) arg, &version, sizeof(version));	if (ret)		ret = -EFAULT;	return ret;}static int ffb_setup(drm_device_t *dev){	int i;	atomic_set(&dev->ioctl_count, 0);	atomic_set(&dev->vma_count, 0);	dev->buf_use = 0;	atomic_set(&dev->buf_alloc, 0);	atomic_set(&dev->total_open, 0);	atomic_set(&dev->total_close, 0);	atomic_set(&dev->total_ioctl, 0);	atomic_set(&dev->total_irq, 0);	atomic_set(&dev->total_ctx, 0);	atomic_set(&dev->total_locks, 0);	atomic_set(&dev->total_unlocks, 0);	atomic_set(&dev->total_contends, 0);	atomic_set(&dev->total_sleeps, 0);	for (i = 0; i < DRM_HASH_SIZE; i++) {		dev->magiclist[i].head = NULL;		dev->magiclist[i].tail = NULL;	}	dev->maplist	    = NULL;	dev->map_count	    = 0;	dev->vmalist	    = NULL;	dev->lock.hw_lock   = NULL;	init_waitqueue_head(&dev->lock.lock_queue);	dev->queue_count    = 0;	dev->queue_reserved = 0;	dev->queue_slots    = 0;	dev->queuelist	    = NULL;	dev->irq	    = 0;	dev->context_flag   = 0;	dev->interrupt_flag = 0;	dev->dma            = 0;	dev->dma_flag	    = 0;	dev->last_context   = 0;	dev->last_switch    = 0;	dev->last_checked   = 0;	init_timer(&dev->timer);	init_waitqueue_head(&dev->context_wait);	dev->ctx_start	    = 0;	dev->lck_start	    = 0;		dev->buf_rp	  = dev->buf;	dev->buf_wp	  = dev->buf;	dev->buf_end	  = dev->buf + DRM_BSZ;	dev->buf_async	  = NULL;	init_waitqueue_head(&dev->buf_readers);	init_waitqueue_head(&dev->buf_writers);	return 0;}static int ffb_open(struct inode *inode, struct file *filp){	drm_device_t *dev;	int minor, i;	int ret = 0;	minor = MINOR(inode->i_rdev);	for (i = 0; i < ffb_dev_table_size; i++) {		ffb_dev_priv_t *ffb_priv;		ffb_priv = (ffb_dev_priv_t *) (ffb_dev_table[i] + 1);		if (ffb_priv->miscdev.minor == minor)			break;	}	if (i >= ffb_dev_table_size)		return -EINVAL;	dev = ffb_dev_table[i];	if (!dev)		return -EINVAL;	DRM_DEBUG("open_count = %d\n", dev->open_count);	ret = drm_open_helper(inode, filp, dev);	if (!ret) {		atomic_inc(&dev->total_open);		spin_lock(&dev->count_lock);		if (!dev->open_count++) {			spin_unlock(&dev->count_lock);			return ffb_setup(dev);		}		spin_unlock(&dev->count_lock);	}	return ret;}static int ffb_release(struct inode *inode, struct file *filp){	drm_file_t *priv = filp->private_data;	drm_device_t *dev;	int ret = 0;	lock_kernel();	dev = priv->dev;	DRM_DEBUG("open_count = %d\n", dev->open_count);	if (dev->lock.hw_lock != NULL	    && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)	    && dev->lock.pid == current->pid) {		ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) (dev + 1);		int context = _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock);		int idx;		/* We have to free up the rogue hw context state		 * holding error or else we will leak it.		 */		idx = context - 1;		if (fpriv->hw_state[idx] != NULL) {			kfree(fpriv->hw_state[idx]);			fpriv->hw_state[idx] = NULL;		}	}	ret = drm_release(inode, filp);	if (!ret) {		atomic_inc(&dev->total_close);		spin_lock(&dev->count_lock);		if (!--dev->open_count) {			if (atomic_read(&dev->ioctl_count) || dev->blocked) {				DRM_ERROR("Device busy: %d %d\n",					  atomic_read(&dev->ioctl_count),					  dev->blocked);				spin_unlock(&dev->count_lock);				unlock_kernel();				return -EBUSY;			}			spin_unlock(&dev->count_lock);			ret = ffb_takedown(dev);			unlock_kernel();			return ret;		}		spin_unlock(&dev->count_lock);	}	unlock_kernel();	return ret;}static int ffb_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg){	int		 nr	 = DRM_IOCTL_NR(cmd);	drm_file_t	 *priv	 = filp->private_data;	drm_device_t	 *dev	 = priv->dev;	drm_ioctl_desc_t *ioctl;	drm_ioctl_t	 *func;	int		 ret;	atomic_inc(&dev->ioctl_count);	atomic_inc(&dev->total_ioctl);	++priv->ioctl_count;		DRM_DEBUG("pid = %d, cmd = 0x%02x, nr = 0x%02x, dev 0x%x, auth = %d\n",		  current->pid, cmd, nr, dev->device, priv->authenticated);	if (nr >= FFB_IOCTL_COUNT) {		ret = -EINVAL;	} else {		ioctl	  = &ffb_ioctls[nr];		func	  = ioctl->func;		if (!func) {			DRM_DEBUG("no function\n");			ret = -EINVAL;		} else if ((ioctl->root_only && !capable(CAP_SYS_ADMIN))			    || (ioctl->auth_needed && !priv->authenticated)) {			ret = -EACCES;		} else {			ret = (func)(inode, filp, cmd, arg);		}	}		atomic_dec(&dev->ioctl_count);	return ret;}static int ffb_lock(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg){        drm_file_t        *priv	= filp->private_data;        drm_device_t      *dev	= priv->dev;        DECLARE_WAITQUEUE(entry, current);        int               ret	= 0;        drm_lock_t        lock;	ret = copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock));	if (ret)		return -EFAULT;        if (lock.context == DRM_KERNEL_CONTEXT) {                DRM_ERROR("Process %d using kernel context %d\n",                          current->pid, lock.context);                return -EINVAL;        }        DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",                  lock.context, current->pid, dev->lock.hw_lock->lock,                  lock.flags);	add_wait_queue(&dev->lock.lock_queue, &entry);	for (;;) {		if (!dev->lock.hw_lock) {			/* Device has been unregistered */			ret = -EINTR;			break;		}		if (drm_lock_take(&dev->lock.hw_lock->lock,				  lock.context)) {			dev->lock.pid       = current->pid;			dev->lock.lock_time = jiffies;			atomic_inc(&dev->total_locks);			break;  /* Got lock */		}                        		/* Contention */		atomic_inc(&dev->total_sleeps);		current->state = TASK_INTERRUPTIBLE;		current->policy |= SCHED_YIELD;		schedule();		if (signal_pending(current)) {			ret = -ERESTARTSYS;			break;		}	}	current->state = TASK_RUNNING;	remove_wait_queue(&dev->lock.lock_queue, &entry);        if (!ret) {		sigemptyset(&dev->sigmask);		sigaddset(&dev->sigmask, SIGSTOP);		sigaddset(&dev->sigmask, SIGTSTP);		sigaddset(&dev->sigmask, SIGTTIN);		sigaddset(&dev->sigmask, SIGTTOU);		dev->sigdata.context = lock.context;		dev->sigdata.lock = dev->lock.hw_lock;		block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask);		if (dev->last_context != lock.context)			ffb_context_switch(dev, dev->last_context, lock.context);	}        DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock");        return ret;}int ffb_unlock(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg){	drm_file_t	  *priv	  = filp->private_data;	drm_device_t	  *dev	  = priv->dev;	drm_lock_t	  lock;	unsigned int old, new, prev, ctx;	int ret;	ret = copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock));	if (ret)		return -EFAULT;		if ((ctx = lock.context) == DRM_KERNEL_CONTEXT) {		DRM_ERROR("Process %d using kernel context %d\n",			  current->pid, lock.context);		return -EINVAL;	}	DRM_DEBUG("%d frees lock (%d holds)\n",		  lock.context,		  _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));	atomic_inc(&dev->total_unlocks);	if (_DRM_LOCK_IS_CONT(dev->lock.hw_lock->lock))		atomic_inc(&dev->total_contends);	/* We no longer really hold it, but if we are the next	 * agent to request it then we should just be able to	 * take it immediately and not eat the ioctl.	 */	dev->lock.pid = 0;	{		__volatile__ unsigned int *plock = &dev->lock.hw_lock->lock;		do {			old  = *plock;			new  = ctx;			prev = cmpxchg(plock, old, new);		} while (prev != old);	}	wake_up_interruptible(&dev->lock.lock_queue);		unblock_all_signals();	return 0;}extern struct vm_operations_struct drm_vm_ops;extern struct vm_operations_struct drm_vm_shm_ops;extern struct vm_operations_struct drm_vm_shm_lock_ops;static int ffb_mmap(struct file *filp, struct vm_area_struct *vma){	drm_file_t	*priv	= filp->private_data;	drm_device_t	*dev	= priv->dev;	drm_map_t	*map	= NULL;	ffb_dev_priv_t	*ffb_priv;	int		i, minor;		DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",		  vma->vm_start, vma->vm_end, VM_OFFSET(vma));	minor = MINOR(filp->f_dentry->d_inode->i_rdev);	ffb_priv = NULL;	for (i = 0; i < ffb_dev_table_size; i++) {		ffb_priv = (ffb_dev_priv_t *) (ffb_dev_table[i] + 1);		if (ffb_priv->miscdev.minor == minor)			break;	}	if (i >= ffb_dev_table_size)		return -EINVAL;	/* We don't support/need dma mappings, so... */	if (!VM_OFFSET(vma))		return -EINVAL;	for (i = 0; i < dev->map_count; i++) {		unsigned long off;		map = dev->maplist[i];		/* Ok, a little hack to make 32-bit apps work. */		off = (map->offset & 0xffffffff);		if (off == VM_OFFSET(vma))			break;	}	if (i >= dev->map_count)		return -EINVAL;	if (!map ||	    ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))		return -EPERM;	if (map->size != (vma->vm_end - vma->vm_start))		return -EINVAL;	/* Set read-only attribute before mappings are created	 * so it works for fb/reg maps too.	 */	if (map->flags & _DRM_READ_ONLY)		vma->vm_page_prot = __pgprot(pte_val(pte_wrprotect(			__pte(pgprot_val(vma->vm_page_prot)))));	switch (map->type) {	case _DRM_FRAME_BUFFER:		/* FALLTHROUGH */	case _DRM_REGISTERS:		/* In order to handle 32-bit drm apps/xserver we		 * play a trick.  The mappings only really specify		 * the 32-bit offset from the cards 64-bit base		 * address, and we just add in the base here.		 */		vma->vm_flags |= VM_IO;		if (io_remap_page_range(vma->vm_start,					ffb_priv->card_phys_base + VM_OFFSET(vma),					vma->vm_end - vma->vm_start,					vma->vm_page_prot, 0))			return -EAGAIN;		vma->vm_ops = &drm_vm_ops;		break;	case _DRM_SHM:		if (map->flags & _DRM_CONTAINS_LOCK)			vma->vm_ops = &drm_vm_shm_lock_ops;		else {			vma->vm_ops = &drm_vm_shm_ops;			vma->vm_private_data = (void *) map;		}		/* Don't let this area swap.  Change when		 * DRM_KERNEL advisory is supported.		 */		vma->vm_flags |= VM_LOCKED;		break;	default:		return -EINVAL;	/* This should never happen. */	};	vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */	vma->vm_file = filp; /* Needed for drm_vm_open() */	drm_vm_open(vma);	return 0;}static drm_map_t *ffb_find_map(struct file *filp, unsigned long off){	drm_file_t	*priv	= filp->private_data;	drm_device_t	*dev;	drm_map_t	*map;	int		i;	if (!priv || (dev = priv->dev) == NULL)		return NULL;	for (i = 0; i < dev->map_count; i++) {		unsigned long uoff;		map = dev->maplist[i];		/* Ok, a little hack to make 32-bit apps work. */		uoff = (map->offset & 0xffffffff);		if (uoff == off)			return map;	}	return NULL;}static unsigned long ffb_get_unmapped_area(struct file *filp, unsigned long hint, unsigned long len, unsigned long pgoff, unsigned long flags){	drm_map_t *map = ffb_find_map(filp, pgoff << PAGE_SHIFT);	unsigned long addr = -ENOMEM;	if (!map)		return get_unmapped_area(NULL, hint, len, pgoff, flags);	if (map->type == _DRM_FRAME_BUFFER ||	    map->type == _DRM_REGISTERS) {#ifdef HAVE_ARCH_FB_UNMAPPED_AREA		addr = get_fb_unmapped_area(filp, hint, len, pgoff, flags);#else		addr = get_unmapped_area(NULL, hint, len, pgoff, flags);#endif	} else if (map->type == _DRM_SHM && SHMLBA > PAGE_SIZE) {		unsigned long slack = SHMLBA - PAGE_SIZE;		addr = get_unmapped_area(NULL, hint, len + slack, pgoff, flags);		if (!(addr & ~PAGE_MASK)) {			unsigned long kvirt = (unsigned long) map->handle;			if ((kvirt & (SHMLBA - 1)) != (addr & (SHMLBA - 1))) {				unsigned long koff, aoff;				koff = kvirt & (SHMLBA - 1);				aoff = addr & (SHMLBA - 1);				if (koff < aoff)					koff += SHMLBA;				addr += (koff - aoff);			}		}	} else {		addr = get_unmapped_area(NULL, hint, len, pgoff, flags);	}	return addr;}module_init(ffb_init);module_exit(ffb_cleanup);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -