drm_bufs.h

来自「优龙2410linux2.6.8内核源代码」· C头文件 代码 · 共 1,305 行 · 第 1/3 页

H
1,305
字号
		buf->total   = alignment;		buf->order   = order;		buf->used    = 0;		buf->offset  = (dma->byte_count + offset);		buf->bus_address = agp_offset + offset;		buf->address = (void *)(agp_offset + offset + dev->sg->handle);		buf->next    = NULL;		buf->waiting = 0;		buf->pending = 0;		init_waitqueue_head( &buf->dma_wait );		buf->filp    = NULL;		buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);		buf->dev_private = DRM(alloc)( sizeof(DRIVER_BUF_PRIV_T),					       DRM_MEM_BUFS );		if(!buf->dev_private) {			/* Set count correctly so we free the proper amount. */			entry->buf_count = count;			DRM(cleanup_buf_error)(entry);			up( &dev->struct_sem );			atomic_dec( &dev->buf_alloc );			return -ENOMEM;		}		memset( buf->dev_private, 0, buf->dev_priv_size );		DRM_DEBUG( "buffer %d @ %p\n",			   entry->buf_count, buf->address );		offset += alignment;		entry->buf_count++;		byte_count += PAGE_SIZE << page_order;	}	DRM_DEBUG( "byte_count: %d\n", byte_count );	temp_buflist = DRM(realloc)( dma->buflist,				     dma->buf_count * sizeof(*dma->buflist),				     (dma->buf_count + entry->buf_count)				     * sizeof(*dma->buflist),				     DRM_MEM_BUFS );	if(!temp_buflist) {		/* Free the entry because it isn't valid */		DRM(cleanup_buf_error)(entry);		up( &dev->struct_sem );		atomic_dec( &dev->buf_alloc );		return -ENOMEM;	}	dma->buflist = temp_buflist;	for ( i = 0 ; i < entry->buf_count ; i++ ) {		dma->buflist[i + dma->buf_count] = &entry->buflist[i];	}	dma->buf_count += entry->buf_count;	dma->byte_count += byte_count;	DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );	DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );#if __HAVE_DMA_FREELIST	DRM(freelist_create)( &entry->freelist, entry->buf_count );	for ( i = 0 ; i < entry->buf_count ; i++ ) {		DRM(freelist_put)( dev, &entry->freelist, &entry->buflist[i] );	}#endif	up( &dev->struct_sem );	request.count = entry->buf_count;	request.size = size;	if ( copy_to_user( argp, &request, sizeof(request) ) )		return -EFAULT;	dma->flags = _DRM_DMA_USE_SG;	atomic_dec( &dev->buf_alloc );	return 0;}#endif /* __HAVE_SG *//** * Add buffers for DMA transfers (ioctl). * * \param inode device inode. * \param filp file pointer. * \param cmd command. * \param arg pointer to a drm_buf_desc_t request. * \return zero on success or a negative number on failure. * * According with the memory type specified in drm_buf_desc::flags and the * build options, it dispatches the call either to addbufs_agp(), * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent * PCI memory respectively. */int DRM(addbufs)( struct inode *inode, struct file *filp,		  unsigned int cmd, unsigned long arg ){	drm_buf_desc_t request;	if ( copy_from_user( &request, (drm_buf_desc_t __user *)arg,			     sizeof(request) ) )		return -EFAULT;#if __REALLY_HAVE_AGP	if ( request.flags & _DRM_AGP_BUFFER )		return DRM(addbufs_agp)( inode, filp, cmd, arg );	else#endif#if __HAVE_SG	if ( request.flags & _DRM_SG_BUFFER )		return DRM(addbufs_sg)( inode, filp, cmd, arg );	else#endif#if __HAVE_PCI_DMA		return DRM(addbufs_pci)( inode, filp, cmd, arg );#else		return -EINVAL;#endif}/** * Get information about the buffer mappings. * * This was originally mean for debugging purposes, or by a sophisticated * client library to determine how best to use the available buffers (e.g., * large buffers can be used for image transfer). * * \param inode device inode. * \param filp file pointer. * \param cmd command. * \param arg pointer to a drm_buf_info structure. * \return zero on success or a negative number on failure. * * Increments drm_device::buf_use while holding the drm_device::count_lock * lock, preventing of allocating more buffers after this call. Information * about each requested buffer is then copied into user space. */int DRM(infobufs)( struct inode *inode, struct file *filp,		   unsigned int cmd, unsigned long arg ){	drm_file_t *priv = filp->private_data;	drm_device_t *dev = priv->dev;	drm_device_dma_t *dma = dev->dma;	drm_buf_info_t request;	drm_buf_info_t __user *argp = (void __user *)arg;	int i;	int count;	if ( !dma ) return -EINVAL;	spin_lock( &dev->count_lock );	if ( atomic_read( &dev->buf_alloc ) ) {		spin_unlock( &dev->count_lock );		return -EBUSY;	}	++dev->buf_use;		/* Can't allocate more after this call */	spin_unlock( &dev->count_lock );	if ( copy_from_user( &request, argp, sizeof(request) ) )		return -EFAULT;	for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {		if ( dma->bufs[i].buf_count ) ++count;	}	DRM_DEBUG( "count = %d\n", count );	if ( request.count >= count ) {		for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {			if ( dma->bufs[i].buf_count ) {				drm_buf_desc_t __user *to = &request.list[count];				drm_buf_entry_t *from = &dma->bufs[i];				drm_freelist_t *list = &dma->bufs[i].freelist;				if ( copy_to_user( &to->count,						   &from->buf_count,						   sizeof(from->buf_count) ) ||				     copy_to_user( &to->size,						   &from->buf_size,						   sizeof(from->buf_size) ) ||				     copy_to_user( &to->low_mark,						   &list->low_mark,						   sizeof(list->low_mark) ) ||				     copy_to_user( &to->high_mark,						   &list->high_mark,						   sizeof(list->high_mark) ) )					return -EFAULT;				DRM_DEBUG( "%d %d %d %d %d\n",					   i,					   dma->bufs[i].buf_count,					   dma->bufs[i].buf_size,					   dma->bufs[i].freelist.low_mark,					   dma->bufs[i].freelist.high_mark );				++count;			}		}	}	request.count = count;	if ( copy_to_user( argp, &request, sizeof(request) ) )		return -EFAULT;	return 0;}/** * Specifies a low and high water mark for buffer allocation * * \param inode device inode. * \param filp file pointer. * \param cmd command. * \param arg a pointer to a drm_buf_desc structure. * \return zero on success or a negative number on failure. * * Verifies that the size order is bounded between the admissible orders and * updates the respective drm_device_dma::bufs entry low and high water mark. * * \note This ioctl is deprecated and mostly never used. */int DRM(markbufs)( struct inode *inode, struct file *filp,		   unsigned int cmd, unsigned long arg ){	drm_file_t *priv = filp->private_data;	drm_device_t *dev = priv->dev;	drm_device_dma_t *dma = dev->dma;	drm_buf_desc_t request;	int order;	drm_buf_entry_t *entry;	if ( !dma ) return -EINVAL;	if ( copy_from_user( &request,			     (drm_buf_desc_t __user *)arg,			     sizeof(request) ) )		return -EFAULT;	DRM_DEBUG( "%d, %d, %d\n",		   request.size, request.low_mark, request.high_mark );	order = DRM(order)( request.size );	if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;	entry = &dma->bufs[order];	if ( request.low_mark < 0 || request.low_mark > entry->buf_count )		return -EINVAL;	if ( request.high_mark < 0 || request.high_mark > entry->buf_count )		return -EINVAL;	entry->freelist.low_mark  = request.low_mark;	entry->freelist.high_mark = request.high_mark;	return 0;}/** * Unreserve the buffers in list, previously reserved using drmDMA.  * * \param inode device inode. * \param filp file pointer. * \param cmd command. * \param arg pointer to a drm_buf_free structure. * \return zero on success or a negative number on failure. *  * Calls free_buffer() for each used buffer. * This function is primarily used for debugging. */int DRM(freebufs)( struct inode *inode, struct file *filp,		   unsigned int cmd, unsigned long arg ){	drm_file_t *priv = filp->private_data;	drm_device_t *dev = priv->dev;	drm_device_dma_t *dma = dev->dma;	drm_buf_free_t request;	int i;	int idx;	drm_buf_t *buf;	if ( !dma ) return -EINVAL;	if ( copy_from_user( &request,			     (drm_buf_free_t __user *)arg,			     sizeof(request) ) )		return -EFAULT;	DRM_DEBUG( "%d\n", request.count );	for ( i = 0 ; i < request.count ; i++ ) {		if ( copy_from_user( &idx,				     &request.list[i],				     sizeof(idx) ) )			return -EFAULT;		if ( idx < 0 || idx >= dma->buf_count ) {			DRM_ERROR( "Index %d (of %d max)\n",				   idx, dma->buf_count - 1 );			return -EINVAL;		}		buf = dma->buflist[idx];		if ( buf->filp != filp ) {			DRM_ERROR( "Process %d freeing buffer not owned\n",				   current->pid );			return -EINVAL;		}		DRM(free_buffer)( dev, buf );	}	return 0;}/** * Maps all of the DMA buffers into client-virtual space (ioctl). * * \param inode device inode. * \param filp file pointer. * \param cmd command. * \param arg pointer to a drm_buf_map structure. * \return zero on success or a negative number on failure. * * Maps the AGP or SG buffer region with do_mmap(), and copies information * about each buffer into user space. The PCI buffers are already mapped on the * addbufs_pci() call. */int DRM(mapbufs)( struct inode *inode, struct file *filp,		  unsigned int cmd, unsigned long arg ){	drm_file_t *priv = filp->private_data;	drm_device_t *dev = priv->dev;	drm_device_dma_t *dma = dev->dma;	drm_buf_map_t __user *argp = (void __user *)arg;	int retcode = 0;	const int zero = 0;	unsigned long virtual;	unsigned long address;	drm_buf_map_t request;	int i;	if ( !dma ) return -EINVAL;	spin_lock( &dev->count_lock );	if ( atomic_read( &dev->buf_alloc ) ) {		spin_unlock( &dev->count_lock );		return -EBUSY;	}	dev->buf_use++;		/* Can't allocate more after this call */	spin_unlock( &dev->count_lock );	if ( copy_from_user( &request, argp, sizeof(request) ) )		return -EFAULT;	if ( request.count >= dma->buf_count ) {		if ( (__HAVE_AGP && (dma->flags & _DRM_DMA_USE_AGP)) ||		     (__HAVE_SG && (dma->flags & _DRM_DMA_USE_SG)) ) {			drm_map_t *map = DRIVER_AGP_BUFFERS_MAP( dev );			if ( !map ) {				retcode = -EINVAL;				goto done;			}#if LINUX_VERSION_CODE <= 0x020402			down( &current->mm->mmap_sem );#else			down_write( &current->mm->mmap_sem );#endif			virtual = do_mmap( filp, 0, map->size,					   PROT_READ | PROT_WRITE,					   MAP_SHARED,					   (unsigned long)map->offset );#if LINUX_VERSION_CODE <= 0x020402			up( &current->mm->mmap_sem );#else			up_write( &current->mm->mmap_sem );#endif		} else {#if LINUX_VERSION_CODE <= 0x020402			down( &current->mm->mmap_sem );#else			down_write( &current->mm->mmap_sem );#endif			virtual = do_mmap( filp, 0, dma->byte_count,					   PROT_READ | PROT_WRITE,					   MAP_SHARED, 0 );#if LINUX_VERSION_CODE <= 0x020402			up( &current->mm->mmap_sem );#else			up_write( &current->mm->mmap_sem );#endif		}		if ( virtual > -1024UL ) {			/* Real error */			retcode = (signed long)virtual;			goto done;		}		request.virtual = (void __user *)virtual;		for ( i = 0 ; i < dma->buf_count ; i++ ) {			if ( copy_to_user( &request.list[i].idx,					   &dma->buflist[i]->idx,					   sizeof(request.list[0].idx) ) ) {				retcode = -EFAULT;				goto done;			}			if ( copy_to_user( &request.list[i].total,					   &dma->buflist[i]->total,					   sizeof(request.list[0].total) ) ) {				retcode = -EFAULT;				goto done;			}			if ( copy_to_user( &request.list[i].used,					   &zero,					   sizeof(zero) ) ) {				retcode = -EFAULT;				goto done;			}			address = virtual + dma->buflist[i]->offset; /* *** */			if ( copy_to_user( &request.list[i].address,					   &address,					   sizeof(address) ) ) {				retcode = -EFAULT;				goto done;			}		}	} done:	request.count = dma->buf_count;	DRM_DEBUG( "%d buffers, retcode = %d\n", request.count, retcode );	if ( copy_to_user( argp, &request, sizeof(request) ) )		return -EFAULT;	return retcode;}#endif /* __HAVE_DMA */

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?