📄 ffb_drv.c
字号:
atomic_set(&dev->ioctl_count, 0); atomic_set(&dev->vma_count, 0); dev->buf_use = 0; atomic_set(&dev->buf_alloc, 0); atomic_set(&dev->total_open, 0); atomic_set(&dev->total_close, 0); atomic_set(&dev->total_ioctl, 0); atomic_set(&dev->total_irq, 0); atomic_set(&dev->total_ctx, 0); atomic_set(&dev->total_locks, 0); atomic_set(&dev->total_unlocks, 0); atomic_set(&dev->total_contends, 0); atomic_set(&dev->total_sleeps, 0); for (i = 0; i < DRM_HASH_SIZE; i++) { dev->magiclist[i].head = NULL; dev->magiclist[i].tail = NULL; } dev->maplist = NULL; dev->map_count = 0; dev->vmalist = NULL; dev->lock.hw_lock = NULL; init_waitqueue_head(&dev->lock.lock_queue); dev->queue_count = 0; dev->queue_reserved = 0; dev->queue_slots = 0; dev->queuelist = NULL; dev->irq = 0; dev->context_flag = 0; dev->interrupt_flag = 0; dev->dma = 0; dev->dma_flag = 0; dev->last_context = 0; dev->last_switch = 0; dev->last_checked = 0; init_timer(&dev->timer); init_waitqueue_head(&dev->context_wait); dev->ctx_start = 0; dev->lck_start = 0; dev->buf_rp = dev->buf; dev->buf_wp = dev->buf; dev->buf_end = dev->buf + DRM_BSZ; dev->buf_async = NULL; init_waitqueue_head(&dev->buf_readers); init_waitqueue_head(&dev->buf_writers); return 0;}static int ffb_open(struct inode *inode, struct file *filp){ drm_device_t *dev; int minor, i; int ret = 0; minor = MINOR(inode->i_rdev); for (i = 0; i < ffb_dev_table_size; i++) { ffb_dev_priv_t *ffb_priv; ffb_priv = (ffb_dev_priv_t *) (ffb_dev_table[i] + 1); if (ffb_priv->miscdev.minor == minor) break; } if (i >= ffb_dev_table_size) return -EINVAL; dev = ffb_dev_table[i]; if (!dev) return -EINVAL; DRM_DEBUG("open_count = %d\n", dev->open_count); ret = drm_open_helper(inode, filp, dev); if (!ret) { atomic_inc(&dev->total_open); spin_lock(&dev->count_lock); if (!dev->open_count++) { spin_unlock(&dev->count_lock); return ffb_setup(dev); } spin_unlock(&dev->count_lock); } return ret;}static int ffb_release(struct inode *inode, struct file *filp){ drm_file_t *priv = filp->private_data; drm_device_t *dev; int ret = 0; lock_kernel(); dev = priv->dev; DRM_DEBUG("open_count = %d\n", dev->open_count); if (dev->lock.hw_lock != NULL && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) && dev->lock.pid == current->pid) { ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) (dev + 1); int context = _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock); int idx; /* We have to free up the rogue hw context state * holding error or else we will leak it. */ idx = context - 1; if (fpriv->hw_state[idx] != NULL) { kfree(fpriv->hw_state[idx]); fpriv->hw_state[idx] = NULL; } } ret = drm_release(inode, filp); if (!ret) { atomic_inc(&dev->total_close); spin_lock(&dev->count_lock); if (!--dev->open_count) { if (atomic_read(&dev->ioctl_count) || dev->blocked) { DRM_ERROR("Device busy: %d %d\n", atomic_read(&dev->ioctl_count), dev->blocked); spin_unlock(&dev->count_lock); unlock_kernel(); return -EBUSY; } spin_unlock(&dev->count_lock); ret = ffb_takedown(dev); unlock_kernel(); return ret; } spin_unlock(&dev->count_lock); } unlock_kernel(); return ret;}static int ffb_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg){ int nr = DRM_IOCTL_NR(cmd); drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; drm_ioctl_desc_t *ioctl; drm_ioctl_t *func; int ret; atomic_inc(&dev->ioctl_count); atomic_inc(&dev->total_ioctl); ++priv->ioctl_count; DRM_DEBUG("pid = %d, cmd = 0x%02x, nr = 0x%02x, dev 0x%x, auth = %d\n", current->pid, cmd, nr, dev->device, priv->authenticated); if (nr >= FFB_IOCTL_COUNT) { ret = -EINVAL; } else { ioctl = &ffb_ioctls[nr]; func = ioctl->func; if (!func) { DRM_DEBUG("no function\n"); ret = -EINVAL; } else if ((ioctl->root_only && !capable(CAP_SYS_ADMIN)) || (ioctl->auth_needed && !priv->authenticated)) { ret = -EACCES; } else { ret = (func)(inode, filp, cmd, arg); } } atomic_dec(&dev->ioctl_count); return ret;}static int ffb_lock(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg){ drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; DECLARE_WAITQUEUE(entry, current); int ret = 0; drm_lock_t lock; ret = copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock)); if (ret) return -EFAULT; if (lock.context == DRM_KERNEL_CONTEXT) { DRM_ERROR("Process %d using kernel context %d\n", current->pid, lock.context); return -EINVAL; } DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n", lock.context, current->pid, dev->lock.hw_lock->lock, lock.flags); add_wait_queue(&dev->lock.lock_queue, &entry); for (;;) { if (!dev->lock.hw_lock) { /* Device has been unregistered */ ret = -EINTR; break; } if (drm_lock_take(&dev->lock.hw_lock->lock, lock.context)) { dev->lock.pid = current->pid; dev->lock.lock_time = jiffies; atomic_inc(&dev->total_locks); break; /* Got lock */ } /* Contention */ atomic_inc(&dev->total_sleeps); current->state = TASK_INTERRUPTIBLE; current->policy |= SCHED_YIELD; schedule(); if (signal_pending(current)) { ret = -ERESTARTSYS; break; } } current->state = TASK_RUNNING; remove_wait_queue(&dev->lock.lock_queue, &entry); if (!ret && (dev->last_context != lock.context)) ffb_context_switch(dev, dev->last_context, lock.context); DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock"); return ret;}int ffb_unlock(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg){ drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; drm_lock_t lock; unsigned int old, new, prev, ctx; int ret; ret = copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock)); if (ret) return -EFAULT; if ((ctx = lock.context) == DRM_KERNEL_CONTEXT) { DRM_ERROR("Process %d using kernel context %d\n", current->pid, lock.context); return -EINVAL; } DRM_DEBUG("%d frees lock (%d holds)\n", lock.context, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); atomic_inc(&dev->total_unlocks); if (_DRM_LOCK_IS_CONT(dev->lock.hw_lock->lock)) atomic_inc(&dev->total_contends); /* We no longer really hold it, but if we are the next * agent to request it then we should just be able to * take it immediately and not eat the ioctl. */ dev->lock.pid = 0; { __volatile__ unsigned int *plock = &dev->lock.hw_lock->lock; do { old = *plock; new = ctx; prev = cmpxchg(plock, old, new); } while (prev != old); } wake_up_interruptible(&dev->lock.lock_queue); return 0;}static void align_fb_mapping(struct vm_area_struct *vma){ unsigned long j, alignment; j = vma->vm_end - vma->vm_start; for (alignment = (4 * 1024 * 1024); alignment > PAGE_SIZE; alignment >>= 3) if (j >= alignment) break; if (alignment > PAGE_SIZE) { j = alignment; alignment = j - (vma->vm_start & (j - 1)); if (alignment != j) { struct vm_area_struct *vmm = find_vma(current->mm,vma->vm_start); if (!vmm || vmm->vm_start >= vma->vm_end + alignment) { vma->vm_start += alignment; vma->vm_end += alignment; } } }}/* The problem here is, due to virtual cache aliasing, * we must make sure the shared memory area lands in the * same dcache line for both the kernel and all drm clients. */static void align_shm_mapping(struct vm_area_struct *vma, unsigned long kvirt){ kvirt &= PAGE_SIZE; if ((vma->vm_start & PAGE_SIZE) != kvirt) { struct vm_area_struct *vmm = find_vma(current->mm, vma->vm_start); if (!vmm || vmm->vm_start >= vma->vm_end + PAGE_SIZE) { vma->vm_start += PAGE_SIZE; vma->vm_end += PAGE_SIZE; } }}extern struct vm_operations_struct drm_vm_ops;extern struct vm_operations_struct drm_vm_shm_ops;extern struct vm_operations_struct drm_vm_shm_lock_ops;static int ffb_mmap(struct file *filp, struct vm_area_struct *vma){ drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; drm_map_t *map = NULL; ffb_dev_priv_t *ffb_priv; int i, minor; DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n", vma->vm_start, vma->vm_end, VM_OFFSET(vma)); minor = MINOR(filp->f_dentry->d_inode->i_rdev); ffb_priv = NULL; for (i = 0; i < ffb_dev_table_size; i++) { ffb_priv = (ffb_dev_priv_t *) (ffb_dev_table[i] + 1); if (ffb_priv->miscdev.minor == minor) break; } if (i >= ffb_dev_table_size) return -EINVAL; /* We don't support/need dma mappings, so... */ if (!VM_OFFSET(vma)) return -EINVAL; for (i = 0; i < dev->map_count; i++) { unsigned long off; map = dev->maplist[i]; /* Ok, a little hack to make 32-bit apps work. */ off = (map->offset & 0xffffffff); if (off == VM_OFFSET(vma)) break; } if (i >= dev->map_count) return -EINVAL; if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) return -EPERM; if (map->size != (vma->vm_end - vma->vm_start)) return -EINVAL; /* Set read-only attribute before mappings are created * so it works for fb/reg maps too. */ if (map->flags & _DRM_READ_ONLY) vma->vm_page_prot = __pgprot(pte_val(pte_wrprotect( __pte(pgprot_val(vma->vm_page_prot))))); switch (map->type) { case _DRM_FRAME_BUFFER: align_fb_mapping(vma); /* FALLTHROUGH */ case _DRM_REGISTERS: /* In order to handle 32-bit drm apps/xserver we * play a trick. The mappings only really specify * the 32-bit offset from the cards 64-bit base * address, and we just add in the base here. */ vma->vm_flags |= VM_IO; if (io_remap_page_range(vma->vm_start, ffb_priv->card_phys_base + VM_OFFSET(vma), vma->vm_end - vma->vm_start, vma->vm_page_prot, 0)) return -EAGAIN; vma->vm_ops = &drm_vm_ops; break; case _DRM_SHM: align_shm_mapping(vma, (unsigned long)dev->lock.hw_lock); if (map->flags & _DRM_CONTAINS_LOCK) vma->vm_ops = &drm_vm_shm_lock_ops; else { vma->vm_ops = &drm_vm_shm_ops; vma->vm_private_data = (void *) map; } /* Don't let this area swap. Change when * DRM_KERNEL advisory is supported. */ vma->vm_flags |= VM_LOCKED; break; default: return -EINVAL; /* This should never happen. */ }; vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */ vma->vm_file = filp; /* Needed for drm_vm_open() */ drm_vm_open(vma); return 0;}module_init(ffb_init);module_exit(ffb_cleanup);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -