⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 drm_bufs.c

📁 linux-2.6.15.6
💻 C
📖 第 1 页 / 共 3 页
字号:
	int total;	int byte_count;	int i;	drm_buf_t **temp_buflist;	if (!drm_core_check_feature(dev, DRIVER_FB_DMA))		return -EINVAL;	if (!dma)		return -EINVAL;	count = request->count;	order = drm_order(request->size);	size = 1 << order;	alignment = (request->flags & _DRM_PAGE_ALIGN)	    ? PAGE_ALIGN(size) : size;	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;	total = PAGE_SIZE << page_order;	byte_count = 0;	agp_offset = request->agp_start;	DRM_DEBUG("count:      %d\n", count);	DRM_DEBUG("order:      %d\n", order);	DRM_DEBUG("size:       %d\n", size);	DRM_DEBUG("agp_offset: %lu\n", agp_offset);	DRM_DEBUG("alignment:  %d\n", alignment);	DRM_DEBUG("page_order: %d\n", page_order);	DRM_DEBUG("total:      %d\n", total);	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)		return -EINVAL;	if (dev->queue_count)		return -EBUSY;	/* Not while in use */	spin_lock(&dev->count_lock);	if (dev->buf_use) {		spin_unlock(&dev->count_lock);		return -EBUSY;	}	atomic_inc(&dev->buf_alloc);	spin_unlock(&dev->count_lock);	down(&dev->struct_sem);	entry = &dma->bufs[order];	if (entry->buf_count) {		up(&dev->struct_sem);		atomic_dec(&dev->buf_alloc);		return -ENOMEM;	/* May only call once for each order */	}	if (count < 0 || count > 4096) {		up(&dev->struct_sem);		atomic_dec(&dev->buf_alloc);		return -EINVAL;	}	entry->buflist = drm_alloc(count * sizeof(*entry->buflist),				   DRM_MEM_BUFS);	if (!entry->buflist) {		up(&dev->struct_sem);		atomic_dec(&dev->buf_alloc);		return -ENOMEM;	}	memset(entry->buflist, 0, count * sizeof(*entry->buflist));	entry->buf_size = size;	entry->page_order = page_order;	offset = 0;	while (entry->buf_count < count) {		buf = &entry->buflist[entry->buf_count];		buf->idx = dma->buf_count + entry->buf_count;		buf->total = alignment;		buf->order = order;		buf->used = 0;		buf->offset = (dma->byte_count + offset);		buf->bus_address = agp_offset + offset;		buf->address = (void *)(agp_offset + offset);		buf->next = NULL;		buf->waiting = 0;		buf->pending = 0;		init_waitqueue_head(&buf->dma_wait);		buf->filp = NULL;		buf->dev_priv_size = dev->driver->dev_priv_size;		buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);		if (!buf->dev_private) {			/* Set count correctly so we free the proper amount. */			entry->buf_count = count;			drm_cleanup_buf_error(dev, entry);			up(&dev->struct_sem);			atomic_dec(&dev->buf_alloc);			return -ENOMEM;		}		memset(buf->dev_private, 0, buf->dev_priv_size);		DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);		offset += alignment;		entry->buf_count++;		byte_count += PAGE_SIZE << page_order;	}	DRM_DEBUG("byte_count: %d\n", byte_count);	temp_buflist = drm_realloc(dma->buflist,				   dma->buf_count * sizeof(*dma->buflist),				   (dma->buf_count + entry->buf_count)				   * sizeof(*dma->buflist), DRM_MEM_BUFS);	if (!temp_buflist) {		/* Free the entry because it isn't valid */		drm_cleanup_buf_error(dev, entry);		up(&dev->struct_sem);		atomic_dec(&dev->buf_alloc);		return -ENOMEM;	}	dma->buflist = temp_buflist;	for (i = 0; i < entry->buf_count; i++) {		dma->buflist[i + dma->buf_count] = &entry->buflist[i];	}	dma->buf_count += entry->buf_count;	dma->byte_count += byte_count;	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);	up(&dev->struct_sem);	request->count = entry->buf_count;	request->size = size;	dma->flags = _DRM_DMA_USE_FB;	atomic_dec(&dev->buf_alloc);	return 0;}/** * Add buffers for DMA transfers (ioctl). * * \param inode device inode. * \param filp file pointer. * \param cmd command. * \param arg pointer to a drm_buf_desc_t request. * \return zero on success or a negative number on failure. * * According with the memory type specified in drm_buf_desc::flags and the * build options, it dispatches the call either to addbufs_agp(), * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent * PCI memory respectively. */int drm_addbufs(struct inode *inode, struct file *filp,		unsigned int cmd, unsigned long arg){	drm_buf_desc_t request;	drm_file_t *priv = filp->private_data;	drm_device_t *dev = priv->head->dev;	int ret;	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))		return -EINVAL;	if (copy_from_user(&request, (drm_buf_desc_t __user *) arg,			   sizeof(request)))		return -EFAULT;#if __OS_HAS_AGP	if (request.flags & _DRM_AGP_BUFFER)		ret = drm_addbufs_agp(dev, &request);	else#endif	if (request.flags & _DRM_SG_BUFFER)		ret = drm_addbufs_sg(dev, &request);	else if (request.flags & _DRM_FB_BUFFER)		ret = drm_addbufs_fb(dev, &request);	else		ret = drm_addbufs_pci(dev, &request);	if (ret == 0) {		if (copy_to_user((void __user *)arg, &request, sizeof(request))) {			ret = -EFAULT;		}	}	return ret;}/** * Get information about the buffer mappings. * * This was originally mean for debugging purposes, or by a sophisticated * client library to determine how best to use the available buffers (e.g., * large buffers can be used for image transfer). * * \param inode device inode. * \param filp file pointer. * \param cmd command. * \param arg pointer to a drm_buf_info structure. * \return zero on success or a negative number on failure. * * Increments drm_device::buf_use while holding the drm_device::count_lock * lock, preventing of allocating more buffers after this call. Information * about each requested buffer is then copied into user space. */int drm_infobufs(struct inode *inode, struct file *filp,		 unsigned int cmd, unsigned long arg){	drm_file_t *priv = filp->private_data;	drm_device_t *dev = priv->head->dev;	drm_device_dma_t *dma = dev->dma;	drm_buf_info_t request;	drm_buf_info_t __user *argp = (void __user *)arg;	int i;	int count;	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))		return -EINVAL;	if (!dma)		return -EINVAL;	spin_lock(&dev->count_lock);	if (atomic_read(&dev->buf_alloc)) {		spin_unlock(&dev->count_lock);		return -EBUSY;	}	++dev->buf_use;		/* Can't allocate more after this call */	spin_unlock(&dev->count_lock);	if (copy_from_user(&request, argp, sizeof(request)))		return -EFAULT;	for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {		if (dma->bufs[i].buf_count)			++count;	}	DRM_DEBUG("count = %d\n", count);	if (request.count >= count) {		for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {			if (dma->bufs[i].buf_count) {				drm_buf_desc_t __user *to =				    &request.list[count];				drm_buf_entry_t *from = &dma->bufs[i];				drm_freelist_t *list = &dma->bufs[i].freelist;				if (copy_to_user(&to->count,						 &from->buf_count,						 sizeof(from->buf_count)) ||				    copy_to_user(&to->size,						 &from->buf_size,						 sizeof(from->buf_size)) ||				    copy_to_user(&to->low_mark,						 &list->low_mark,						 sizeof(list->low_mark)) ||				    copy_to_user(&to->high_mark,						 &list->high_mark,						 sizeof(list->high_mark)))					return -EFAULT;				DRM_DEBUG("%d %d %d %d %d\n",					  i,					  dma->bufs[i].buf_count,					  dma->bufs[i].buf_size,					  dma->bufs[i].freelist.low_mark,					  dma->bufs[i].freelist.high_mark);				++count;			}		}	}	request.count = count;	if (copy_to_user(argp, &request, sizeof(request)))		return -EFAULT;	return 0;}/** * Specifies a low and high water mark for buffer allocation * * \param inode device inode. * \param filp file pointer. * \param cmd command. * \param arg a pointer to a drm_buf_desc structure. * \return zero on success or a negative number on failure. * * Verifies that the size order is bounded between the admissible orders and * updates the respective drm_device_dma::bufs entry low and high water mark. * * \note This ioctl is deprecated and mostly never used. */int drm_markbufs(struct inode *inode, struct file *filp,		 unsigned int cmd, unsigned long arg){	drm_file_t *priv = filp->private_data;	drm_device_t *dev = priv->head->dev;	drm_device_dma_t *dma = dev->dma;	drm_buf_desc_t request;	int order;	drm_buf_entry_t *entry;	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))		return -EINVAL;	if (!dma)		return -EINVAL;	if (copy_from_user(&request,			   (drm_buf_desc_t __user *) arg, sizeof(request)))		return -EFAULT;	DRM_DEBUG("%d, %d, %d\n",		  request.size, request.low_mark, request.high_mark);	order = drm_order(request.size);	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)		return -EINVAL;	entry = &dma->bufs[order];	if (request.low_mark < 0 || request.low_mark > entry->buf_count)		return -EINVAL;	if (request.high_mark < 0 || request.high_mark > entry->buf_count)		return -EINVAL;	entry->freelist.low_mark = request.low_mark;	entry->freelist.high_mark = request.high_mark;	return 0;}/** * Unreserve the buffers in list, previously reserved using drmDMA. * * \param inode device inode. * \param filp file pointer. * \param cmd command. * \param arg pointer to a drm_buf_free structure. * \return zero on success or a negative number on failure. * * Calls free_buffer() for each used buffer. * This function is primarily used for debugging. */int drm_freebufs(struct inode *inode, struct file *filp,		 unsigned int cmd, unsigned long arg){	drm_file_t *priv = filp->private_data;	drm_device_t *dev = priv->head->dev;	drm_device_dma_t *dma = dev->dma;	drm_buf_free_t request;	int i;	int idx;	drm_buf_t *buf;	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))		return -EINVAL;	if (!dma)		return -EINVAL;	if (copy_from_user(&request,			   (drm_buf_free_t __user *) arg, sizeof(request)))		return -EFAULT;	DRM_DEBUG("%d\n", request.count);	for (i = 0; i < request.count; i++) {		if (copy_from_user(&idx, &request.list[i], sizeof(idx)))			return -EFAULT;		if (idx < 0 || idx >= dma->buf_count) {			DRM_ERROR("Index %d (of %d max)\n",				  idx, dma->buf_count - 1);			return -EINVAL;		}		buf = dma->buflist[idx];		if (buf->filp != filp) {			DRM_ERROR("Process %d freeing buffer not owned\n",				  current->pid);			return -EINVAL;		}		drm_free_buffer(dev, buf);	}	return 0;}/** * Maps all of the DMA buffers into client-virtual space (ioctl). * * \param inode device inode. * \param filp file pointer. * \param cmd command. * \param arg pointer to a drm_buf_map structure. * \return zero on success or a negative number on failure. * * Maps the AGP or SG buffer region with do_mmap(), and copies information * about each buffer into user space. The PCI buffers are already mapped on the * addbufs_pci() call. */int drm_mapbufs(struct inode *inode, struct file *filp,		unsigned int cmd, unsigned long arg){	drm_file_t *priv = filp->private_data;	drm_device_t *dev = priv->head->dev;	drm_device_dma_t *dma = dev->dma;	drm_buf_map_t __user *argp = (void __user *)arg;	int retcode = 0;	const int zero = 0;	unsigned long virtual;	unsigned long address;	drm_buf_map_t request;	int i;	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))		return -EINVAL;	if (!dma)		return -EINVAL;	spin_lock(&dev->count_lock);	if (atomic_read(&dev->buf_alloc)) {		spin_unlock(&dev->count_lock);		return -EBUSY;	}	dev->buf_use++;		/* Can't allocate more after this call */	spin_unlock(&dev->count_lock);	if (copy_from_user(&request, argp, sizeof(request)))		return -EFAULT;	if (request.count >= dma->buf_count) {		if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))		    || (drm_core_check_feature(dev, DRIVER_SG)			&& (dma->flags & _DRM_DMA_USE_SG))		    || (drm_core_check_feature(dev, DRIVER_FB_DMA)			&& (dma->flags & _DRM_DMA_USE_FB))) {			drm_map_t *map = dev->agp_buffer_map;			unsigned long token = dev->agp_buffer_token;			if (!map) {				retcode = -EINVAL;				goto done;			}			down_write(&current->mm->mmap_sem);			virtual = do_mmap(filp, 0, map->size,					  PROT_READ | PROT_WRITE,					  MAP_SHARED, token);			up_write(&current->mm->mmap_sem);		} else {			down_write(&current->mm->mmap_sem);			virtual = do_mmap(filp, 0, dma->byte_count,					  PROT_READ | PROT_WRITE,					  MAP_SHARED, 0);			up_write(&current->mm->mmap_sem);		}		if (virtual > -1024UL) {			/* Real error */			retcode = (signed long)virtual;			goto done;		}		request.virtual = (void __user *)virtual;		for (i = 0; i < dma->buf_count; i++) {			if (copy_to_user(&request.list[i].idx,					 &dma->buflist[i]->idx,					 sizeof(request.list[0].idx))) {				retcode = -EFAULT;				goto done;			}			if (copy_to_user(&request.list[i].total,					 &dma->buflist[i]->total,					 sizeof(request.list[0].total))) {				retcode = -EFAULT;				goto done;			}			if (copy_to_user(&request.list[i].used,					 &zero, sizeof(zero))) {				retcode = -EFAULT;				goto done;			}			address = virtual + dma->buflist[i]->offset;	/* *** */			if (copy_to_user(&request.list[i].address,					 &address, sizeof(address))) {				retcode = -EFAULT;				goto done;			}		}	}      done:	request.count = dma->buf_count;	DRM_DEBUG("%d buffers, retcode = %d\n", request.count, retcode);	if (copy_to_user(argp, &request, sizeof(request)))		return -EFAULT;	return retcode;}/** * Compute size order.  Returns the exponent of the smaller power of two which * is greater or equal to given number. * * \param size size. * \return order. * * \todo Can be made faster. */int drm_order(unsigned long size){	int order;	unsigned long tmp;	for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;	if (size & (size - 1))		++order;	return order;}EXPORT_SYMBOL(drm_order);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -