⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 savage_bci.c

📁 底层驱动开发
💻 C
📖 第 1 页 / 共 3 页
字号:
		dev_priv->agp_textures =			drm_core_findmap(dev, init->agp_textures_offset);		if (!dev_priv->agp_textures) {			DRM_ERROR("could not find agp texture region!\n");			savage_do_cleanup_bci(dev);			return DRM_ERR(EINVAL);		}	} else {		dev_priv->agp_textures = NULL;	}	if (init->cmd_dma_offset) {		if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {			DRM_ERROR("command DMA not supported on "				  "Savage3D/MX/IX.\n");			savage_do_cleanup_bci(dev);			return DRM_ERR(EINVAL);		}		if (dev->dma && dev->dma->buflist) {			DRM_ERROR("command and vertex DMA not supported "				  "at the same time.\n");			savage_do_cleanup_bci(dev);			return DRM_ERR(EINVAL);		}		dev_priv->cmd_dma = drm_core_findmap(dev, init->cmd_dma_offset);		if (!dev_priv->cmd_dma) {			DRM_ERROR("could not find command DMA region!\n");			savage_do_cleanup_bci(dev);			return DRM_ERR(EINVAL);		}		if (dev_priv->dma_type == SAVAGE_DMA_AGP) {			if (dev_priv->cmd_dma->type != _DRM_AGP) {				DRM_ERROR("AGP command DMA region is not a "					  "_DRM_AGP map!\n");				savage_do_cleanup_bci(dev);				return DRM_ERR(EINVAL);			}			drm_core_ioremap(dev_priv->cmd_dma, dev);			if (!dev_priv->cmd_dma->handle) {				DRM_ERROR("failed to ioremap command "					  "DMA region!\n");				savage_do_cleanup_bci(dev);				return DRM_ERR(ENOMEM);			}		} else if (dev_priv->cmd_dma->type != _DRM_CONSISTENT) {			DRM_ERROR("PCI command DMA region is not a "				  "_DRM_CONSISTENT map!\n");			savage_do_cleanup_bci(dev);			return DRM_ERR(EINVAL);		}	} else {		dev_priv->cmd_dma = NULL;	}	dev_priv->dma_flush = savage_dma_flush;	if (!dev_priv->cmd_dma) {		DRM_DEBUG("falling back to faked command DMA.\n");		dev_priv->fake_dma.offset = 0;		dev_priv->fake_dma.size = SAVAGE_FAKE_DMA_SIZE;		dev_priv->fake_dma.type = _DRM_SHM;		dev_priv->fake_dma.handle = drm_alloc(SAVAGE_FAKE_DMA_SIZE,						      DRM_MEM_DRIVER);		if (!dev_priv->fake_dma.handle) {			DRM_ERROR("could not allocate faked DMA buffer!\n");			savage_do_cleanup_bci(dev);			return DRM_ERR(ENOMEM);		}		dev_priv->cmd_dma = &dev_priv->fake_dma;		dev_priv->dma_flush = savage_fake_dma_flush;	}	dev_priv->sarea_priv =		(drm_savage_sarea_t *)((uint8_t *)dev_priv->sarea->handle +				       init->sarea_priv_offset);	/* setup bitmap descriptors */	{		unsigned int color_tile_format;		unsigned int depth_tile_format;		unsigned int front_stride, back_stride, depth_stride;		if (dev_priv->chipset <= S3_SAVAGE4) {			color_tile_format = dev_priv->fb_bpp == 16 ?				SAVAGE_BD_TILE_16BPP : SAVAGE_BD_TILE_32BPP;			depth_tile_format = dev_priv->depth_bpp == 16 ?				SAVAGE_BD_TILE_16BPP : SAVAGE_BD_TILE_32BPP;		} else {			color_tile_format = SAVAGE_BD_TILE_DEST;			depth_tile_format = SAVAGE_BD_TILE_DEST;		}		front_stride = dev_priv->front_pitch / (dev_priv->fb_bpp/8);		back_stride  = dev_priv-> back_pitch / (dev_priv->fb_bpp/8);		depth_stride = dev_priv->depth_pitch / (dev_priv->depth_bpp/8);		dev_priv->front_bd = front_stride | SAVAGE_BD_BW_DISABLE |			(dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) |			(color_tile_format << SAVAGE_BD_TILE_SHIFT);		dev_priv-> back_bd =  back_stride | SAVAGE_BD_BW_DISABLE |			(dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) |			(color_tile_format << SAVAGE_BD_TILE_SHIFT);		dev_priv->depth_bd = depth_stride | SAVAGE_BD_BW_DISABLE |			(dev_priv->depth_bpp << SAVAGE_BD_BPP_SHIFT) |			(depth_tile_format << SAVAGE_BD_TILE_SHIFT);	}	/* setup status and bci ptr */	dev_priv->event_counter = 0;	dev_priv->event_wrap = 0;	dev_priv->bci_ptr = (volatile uint32_t *)	    ((uint8_t *)dev_priv->mmio->handle + SAVAGE_BCI_OFFSET);	if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {		dev_priv->status_used_mask = SAVAGE_FIFO_USED_MASK_S3D;	} else {		dev_priv->status_used_mask = SAVAGE_FIFO_USED_MASK_S4;	}	if (dev_priv->status != NULL) {		dev_priv->status_ptr =			(volatile uint32_t *)dev_priv->status->handle;		dev_priv->wait_fifo = savage_bci_wait_fifo_shadow;		dev_priv->wait_evnt = savage_bci_wait_event_shadow;		dev_priv->status_ptr[1023] = dev_priv->event_counter;	} else {		dev_priv->status_ptr = NULL;		if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {			dev_priv->wait_fifo = savage_bci_wait_fifo_s3d;		} else {			dev_priv->wait_fifo = savage_bci_wait_fifo_s4;		}		dev_priv->wait_evnt = savage_bci_wait_event_reg;	}	/* cliprect functions */	if (S3_SAVAGE3D_SERIES(dev_priv->chipset))		dev_priv->emit_clip_rect = savage_emit_clip_rect_s3d;	else		dev_priv->emit_clip_rect = savage_emit_clip_rect_s4;	if (savage_freelist_init(dev) < 0) {		DRM_ERROR("could not initialize freelist\n");		savage_do_cleanup_bci(dev);		return DRM_ERR(ENOMEM);	}	if (savage_dma_init(dev_priv) <  0) {		DRM_ERROR("could not initialize command DMA\n");		savage_do_cleanup_bci(dev);		return DRM_ERR(ENOMEM);	}	return 0;}int savage_do_cleanup_bci(drm_device_t *dev){	drm_savage_private_t *dev_priv = dev->dev_private;	if (dev_priv->cmd_dma == &dev_priv->fake_dma) {		if (dev_priv->fake_dma.handle)			drm_free(dev_priv->fake_dma.handle,				 SAVAGE_FAKE_DMA_SIZE, DRM_MEM_DRIVER);	} else if (dev_priv->cmd_dma && dev_priv->cmd_dma->handle &&		   dev_priv->cmd_dma->type == _DRM_AGP &&		   dev_priv->dma_type == SAVAGE_DMA_AGP)		drm_core_ioremapfree(dev_priv->cmd_dma, dev);	if (dev_priv->dma_type == SAVAGE_DMA_AGP &&	    dev->agp_buffer_map && dev->agp_buffer_map->handle) {		drm_core_ioremapfree(dev->agp_buffer_map, dev);		/* make sure the next instance (which may be running		 * in PCI mode) doesn't try to use an old		 * agp_buffer_map. */		dev->agp_buffer_map = NULL;	}	if (dev_priv->dma_pages)		drm_free(dev_priv->dma_pages,			 sizeof(drm_savage_dma_page_t)*dev_priv->nr_dma_pages,			 DRM_MEM_DRIVER);	return 0;}static int savage_bci_init(DRM_IOCTL_ARGS){	DRM_DEVICE;	drm_savage_init_t init;	LOCK_TEST_WITH_RETURN(dev, filp);	DRM_COPY_FROM_USER_IOCTL(init, (drm_savage_init_t __user *)data,				 sizeof(init));	switch (init.func) {	case SAVAGE_INIT_BCI:		return savage_do_init_bci(dev, &init);	case SAVAGE_CLEANUP_BCI:		return savage_do_cleanup_bci(dev);	}	return DRM_ERR(EINVAL);}static int savage_bci_event_emit(DRM_IOCTL_ARGS){	DRM_DEVICE;	drm_savage_private_t *dev_priv = dev->dev_private;	drm_savage_event_emit_t event;	DRM_DEBUG("\n");	LOCK_TEST_WITH_RETURN(dev, filp);	DRM_COPY_FROM_USER_IOCTL(event, (drm_savage_event_emit_t __user *)data,				 sizeof(event));	event.count = savage_bci_emit_event(dev_priv, event.flags);	event.count |= dev_priv->event_wrap << 16;	DRM_COPY_TO_USER_IOCTL(&((drm_savage_event_emit_t __user *)data)->count,			       event.count, sizeof(event.count));	return 0;}static int savage_bci_event_wait(DRM_IOCTL_ARGS){	DRM_DEVICE;	drm_savage_private_t *dev_priv = dev->dev_private;	drm_savage_event_wait_t event;	unsigned int event_e, hw_e;	unsigned int event_w, hw_w;	DRM_DEBUG("\n");	DRM_COPY_FROM_USER_IOCTL(event, (drm_savage_event_wait_t __user *)data,				 sizeof(event));	UPDATE_EVENT_COUNTER();	if (dev_priv->status_ptr)		hw_e = dev_priv->status_ptr[1] & 0xffff;	else		hw_e = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff;	hw_w = dev_priv->event_wrap;	if (hw_e > dev_priv->event_counter)		hw_w--; /* hardware hasn't passed the last wrap yet */	event_e = event.count & 0xffff;	event_w = event.count >> 16;	/* Don't need to wait if	 * - event counter wrapped since the event was emitted or	 * - the hardware has advanced up to or over the event to wait for.	 */	if (event_w < hw_w || (event_w == hw_w && event_e <= hw_e) )		return 0;	else		return dev_priv->wait_evnt(dev_priv, event_e);}/* * DMA buffer management */static int savage_bci_get_buffers(DRMFILE filp, drm_device_t *dev, drm_dma_t *d){	drm_buf_t *buf;	int i;	for (i = d->granted_count; i < d->request_count; i++) {		buf = savage_freelist_get(dev);		if (!buf)			return DRM_ERR(EAGAIN);		buf->filp = filp;		if (DRM_COPY_TO_USER(&d->request_indices[i],				     &buf->idx, sizeof(buf->idx)))			return DRM_ERR(EFAULT);		if (DRM_COPY_TO_USER(&d->request_sizes[i],				     &buf->total, sizeof(buf->total)))			return DRM_ERR(EFAULT);		d->granted_count++;	}	return 0;}int savage_bci_buffers(DRM_IOCTL_ARGS){	DRM_DEVICE;	drm_device_dma_t *dma = dev->dma;	drm_dma_t d;	int ret = 0;	LOCK_TEST_WITH_RETURN(dev, filp);	DRM_COPY_FROM_USER_IOCTL(d, (drm_dma_t __user *)data, sizeof(d));	/* Please don't send us buffers.	 */	if (d.send_count != 0) {		DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",			  DRM_CURRENTPID, d.send_count);		return DRM_ERR(EINVAL);	}	/* We'll send you buffers.	 */	if (d.request_count < 0 || d.request_count > dma->buf_count) {		DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",			  DRM_CURRENTPID, d.request_count, dma->buf_count);		return DRM_ERR(EINVAL);	}	d.granted_count = 0;	if (d.request_count) {		ret = savage_bci_get_buffers(filp, dev, &d);	}	DRM_COPY_TO_USER_IOCTL((drm_dma_t __user *)data, d, sizeof(d));	return ret;}void savage_reclaim_buffers(drm_device_t *dev, DRMFILE filp) {	drm_device_dma_t *dma = dev->dma;	drm_savage_private_t *dev_priv = dev->dev_private;	int i;	if (!dma)		return;	if (!dev_priv)		return;	if (!dma->buflist)		return;	/*i830_flush_queue(dev);*/	for (i = 0; i < dma->buf_count; i++) {		drm_buf_t *buf = dma->buflist[i];		drm_savage_buf_priv_t *buf_priv = buf->dev_private;		if (buf->filp == filp && buf_priv &&		    buf_priv->next == NULL && buf_priv->prev == NULL) {			uint16_t event;			DRM_DEBUG("reclaimed from client\n");			event = savage_bci_emit_event(dev_priv, SAVAGE_WAIT_3D);			SET_AGE(&buf_priv->age, event, dev_priv->event_wrap);			savage_freelist_put(dev, buf);		}	}	drm_core_reclaim_buffers(dev, filp);}drm_ioctl_desc_t savage_ioctls[] = {	[DRM_IOCTL_NR(DRM_SAVAGE_BCI_INIT)] = {savage_bci_init, 1, 1},	[DRM_IOCTL_NR(DRM_SAVAGE_BCI_CMDBUF)] = {savage_bci_cmdbuf, 1, 0},	[DRM_IOCTL_NR(DRM_SAVAGE_BCI_EVENT_EMIT)] = {savage_bci_event_emit, 1, 0},	[DRM_IOCTL_NR(DRM_SAVAGE_BCI_EVENT_WAIT)] = {savage_bci_event_wait, 1, 0},};int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -