📄 omap24xxcam.c
字号:
if (!vid2_format) { /* use win and crop if it is for overlay */ dispc_reg_out(cam, DISPC_VID_ACCU0(v), 0); dispc_reg_out(cam, DISPC_VID_ACCU1(v), 0); firhinc = (1024*(crop->width - 1))/(win->w.width - 1); if (firhinc < 1) firhinc = 1; else if (firhinc > 2047) firhinc = 2047; firvinc = (1024*(crop->height - 1))/(win->w.height - 1); if (firvinc < 1) firvinc = 1; else if (firvinc > 2047) firvinc = 2047; dispc_reg_out(cam, DISPC_VID_FIR(v), firhinc | (firvinc << 16)); /* configure the target window on the display */ vid_position_x = cam->gfx_position_x + win->w.left; vid_position_y = cam->gfx_position_y + win->w.top; vid_size = (((win->w.width - 1) << DISPC_VID_SIZE_VIDSIZEX_SHIFT) & DISPC_VID_SIZE_VIDSIZEX) | (((win->w.height - 1) << DISPC_VID_SIZE_VIDSIZEY_SHIFT) & DISPC_VID_SIZE_VIDSIZEY); /* configure the source window in the framebuffer */ vid_picture_size = (((crop->width - 1) << DISPC_VID_PICTURE_SIZE_VIDORGSIZEX_SHIFT) & DISPC_VID_PICTURE_SIZE_VIDORGSIZEX) | (((crop->height - 1) << DISPC_VID_PICTURE_SIZE_VIDORGSIZEY_SHIFT) & DISPC_VID_PICTURE_SIZE_VIDORGSIZEY); dispc_reg_out(cam, DISPC_VID_ROW_INC(v), 1 + pix->bytesperline - crop->width*2); } else { /* video 2 layer configuration */ struct v4l2_framebuffer *fbuf = &cam->fbuf; int row_inc; /* in video2 layer we won't be down or up scaling */ dispc_reg_out(cam, DISPC_VID_FIR(v), 0x400 | (0x400 << 16)); if ( pix->width + vid2_format->left > fbuf->fmt.width) { vid_position_x = fbuf->fmt.width < pix->width ? 0: cam->gfx_position_x + fbuf->fmt.width - pix->width; } else { vid_position_x = cam->gfx_position_x + vid2_format->left; } if (pix->height + vid2_format->top > fbuf->fmt.height) { vid_position_y = fbuf->fmt.height < pix->height ?0 : cam->gfx_position_y + fbuf->fmt.height - pix->height ; } else { vid_position_y = cam->gfx_position_y + vid2_format->top; } vid_size = ((((pix->width > fbuf->fmt.width ? fbuf->fmt.width : pix->width) - 1) << DISPC_VID_SIZE_VIDSIZEX_SHIFT) & DISPC_VID_SIZE_VIDSIZEX) | ((((pix->height > fbuf->fmt.height ? fbuf->fmt.height : pix->height) - 1) << DISPC_VID_SIZE_VIDSIZEY_SHIFT) & DISPC_VID_SIZE_VIDSIZEY); vid_picture_size = vid_size; row_inc = ((pix->width - ((vid_size >> DISPC_VID_SIZE_VIDSIZEX_SHIFT) & DISPC_VID_SIZE_VIDSIZEX)) *2) - 1; /* we are subtracting 1 instead of adding because vid_size ** is 1 less than the pix width */ dispc_reg_out(cam, DISPC_VID_ROW_INC(v), row_inc); } vid_position = ((vid_position_x << DISPC_VID_POSITION_VIDPOSX_SHIFT) & DISPC_VID_POSITION_VIDPOSX) | ((vid_position_y << DISPC_VID_POSITION_VIDPOSY_SHIFT) & DISPC_VID_POSITION_VIDPOSY); dispc_reg_out(cam, DISPC_VID_POSITION(v), vid_position); dispc_reg_out(cam, DISPC_VID_SIZE(v), vid_size); dispc_reg_out(cam, DISPC_VID_PICTURE_SIZE(v), vid_picture_size); dispc_reg_out(cam, DISPC_VID_PIXEL_INC(v), 1); if (vid2_format) { unsigned long vid2_base_phys; vid2_base_phys = cam->video2_base_phys ; //+ pix->bytesperline*crop->top + crop->left*2; dispc_reg_out(cam, DISPC_VID_BA0(v), vid2_base_phys); dispc_reg_out(cam, DISPC_VID_BA1(v), vid2_base_phys); vid_attributes = dispc_reg_merge(cam, DISPC_VID_ATTRIBUTES(v), DISPC_VID_ATTRIBUTES_ENABLE, DISPC_VID_ATTRIBUTES_ENABLE); if (vid_attributes & DISPC_VID_ATTRIBUTES_VIDCHANNELOUT) { /* digital output */ dispc_reg_merge(cam, DISPC_CONTROL, DISPC_CONTROL_GODIGITAL, DISPC_CONTROL_GODIGITAL); } else { /* LCD */ dispc_reg_merge(cam, DISPC_CONTROL,DISPC_CONTROL_GOLCD, DISPC_CONTROL_GOLCD); } }}/* -------------------------------------------------------------------------- *//* Start a DMA transfer from the camera to memory. * Returns zero if the transfer was successfully started, or non-zero if all * DMA channels are already in use or starting is currently inhibited. */static intomap24xxcam_dma_start(struct omap24xxcam_device *cam, dma_addr_t start, unsigned long len, dma_callback_t callback, void *arg){ unsigned long irqflags; int dmach; void (*dma_notify)(struct omap24xxcam_device *cam); spin_lock_irqsave(&cam->dma_lock, irqflags); if (!cam->free_dmach || cam->dma_stop) { spin_unlock_irqrestore(&cam->dma_lock, irqflags); return -EBUSY; } dmach = cam->next_dmach; cam->camdma[dmach].callback = callback; cam->camdma[dmach].arg = arg; camdma_reg_out(cam, CAMDMA_CCR(dmach), CAMDMA_CCR_SEL_SRC_DST_SYNC | CAMDMA_CCR_BS | CAMDMA_CCR_DST_AMODE_POST_INC | CAMDMA_CCR_SRC_AMODE_POST_INC | CAMDMA_CCR_FS | CAMDMA_CCR_SYNCHRO_CAMERA); camdma_reg_out(cam, CAMDMA_CLNK_CTRL(dmach), 0); camdma_reg_out(cam, CAMDMA_CEN(dmach), len); camdma_reg_out(cam, CAMDMA_CFN(dmach), 1); camdma_reg_out(cam, CAMDMA_CSDP(dmach), CAMDMA_CSDP_WRITE_MODE_POSTED | CAMDMA_CSDP_DST_BURST_EN_16 | CAMDMA_CSDP_DST_PACKED | CAMDMA_CSDP_SRC_BURST_EN_16 | CAMDMA_CSDP_SRC_PACKED | CAMDMA_CSDP_DATA_TYPE_8BITS); camdma_reg_out(cam, CAMDMA_CSSA(dmach), 0); camdma_reg_out(cam, CAMDMA_CDSA(dmach), start); camdma_reg_out(cam, CAMDMA_CSEI(dmach), 0); camdma_reg_out(cam, CAMDMA_CSFI(dmach), DMA_THRESHOLD); camdma_reg_out(cam, CAMDMA_CDEI(dmach), 0); camdma_reg_out(cam, CAMDMA_CDFI(dmach), 0); camdma_reg_out(cam, CAMDMA_CSR(dmach), CAMDMA_CSR_MISALIGNED_ERR | CAMDMA_CSR_SUPERVISOR_ERR | CAMDMA_CSR_SECURE_ERR | CAMDMA_CSR_TRANS_ERR | CAMDMA_CSR_BLOCK | CAMDMA_CSR_DROP); camdma_reg_out(cam, CAMDMA_CICR(dmach), CAMDMA_CICR_MISALIGNED_ERR_IE | CAMDMA_CICR_SUPERVISOR_ERR_IE | CAMDMA_CICR_SECURE_ERR_IE | CAMDMA_CICR_TRANS_ERR_IE | CAMDMA_CICR_BLOCK_IE | CAMDMA_CICR_DROP_IE); /* We're ready to start the DMA transfer. */ if (cam->free_dmach < NUM_CAMDMA_CHANNELS) { /* A transfer is already in progress, so try to chain to it. */ int prev_dmach, ch; if (dmach == 0) prev_dmach = NUM_CAMDMA_CHANNELS - 1; else prev_dmach = dmach - 1; camdma_reg_out(cam, CAMDMA_CLNK_CTRL(prev_dmach), CAMDMA_CLNK_CTRL_ENABLE_LNK | dmach); /* Did we chain the DMA transfer before the previous one * finished? */ ch = (dmach + cam->free_dmach) % NUM_CAMDMA_CHANNELS; while (!(camdma_reg_in(cam, CAMDMA_CCR(ch)) & CAMDMA_CCR_ENABLE)) { if (ch == dmach) { /* The previous transfer has ended and this one * hasn't started, so we must not have chained * to the previous one in time. We'll have to * start it now. */ camdma_reg_out(cam, CAMDMA_CCR(dmach), CAMDMA_CCR_SEL_SRC_DST_SYNC | CAMDMA_CCR_BS | CAMDMA_CCR_DST_AMODE_POST_INC | CAMDMA_CCR_SRC_AMODE_POST_INC | CAMDMA_CCR_ENABLE | CAMDMA_CCR_FS | CAMDMA_CCR_SYNCHRO_CAMERA); break; } else ch = (ch + 1) % NUM_CAMDMA_CHANNELS; } } else { /* No transfer is in progress, so we'll just start this one * now. */ camdma_reg_out(cam, CAMDMA_CCR(dmach), CAMDMA_CCR_SEL_SRC_DST_SYNC | CAMDMA_CCR_BS | CAMDMA_CCR_DST_AMODE_POST_INC | CAMDMA_CCR_SRC_AMODE_POST_INC | CAMDMA_CCR_ENABLE | CAMDMA_CCR_FS | CAMDMA_CCR_SYNCHRO_CAMERA); } cam->next_dmach = (cam->next_dmach + 1) % NUM_CAMDMA_CHANNELS; cam->free_dmach--; dma_notify = cam->dma_notify; cam->dma_notify = NULL; spin_unlock_irqrestore(&cam->dma_lock, irqflags); if (dma_notify) (*dma_notify)(cam); return 0;}/* DMA completion routine for the scatter-gather DMA fragments. */static voidomap24xxcam_sg_dma_callback(struct omap24xxcam_device *cam, unsigned long csr, void *arg){ int sgslot = (int) arg; struct sgdma_state *sgdma; const unsigned long csr_error = CAMDMA_CSR_MISALIGNED_ERR | CAMDMA_CSR_SUPERVISOR_ERR | CAMDMA_CSR_SECURE_ERR | CAMDMA_CSR_TRANS_ERR | CAMDMA_CSR_DROP; spin_lock(&cam->sg_lock); sgdma = cam->sgdma + sgslot; if (!sgdma->queued_sglist) { spin_unlock(&cam->sg_lock); printk(KERN_DEBUG CAM_NAME ": sgdma completed when none queued!\n"); return; } sgdma->csr |= csr; if (!--sgdma->queued_sglist) { /* Queue for this sglist is empty, so check to see if we're * done. */ if ((sgdma->next_sglist == sgdma->sglen) || (sgdma->csr & csr_error)) { dma_callback_t callback = sgdma->callback; void *arg = sgdma->arg; unsigned long sg_csr = sgdma->csr; /* All done with this sglist */ cam->free_sgdma++; if (callback) { spin_unlock(&cam->sg_lock); (*callback)(cam, sg_csr, arg); return; } } } spin_unlock(&cam->sg_lock);}/* Process the scatter-gather DMA queue by starting queued transfers. */static voidomap24xxcam_sg_dma_process(struct omap24xxcam_device *cam){ unsigned long irqflags; int queued_sgdma, sgslot; struct sgdma_state *sgdma; const unsigned long csr_error = CAMDMA_CSR_MISALIGNED_ERR | CAMDMA_CSR_SUPERVISOR_ERR | CAMDMA_CSR_SECURE_ERR | CAMDMA_CSR_TRANS_ERR | CAMDMA_CSR_DROP; spin_lock_irqsave(&cam->sg_lock, irqflags); queued_sgdma = NUM_SG_DMA - cam->free_sgdma; sgslot = (cam->next_sgdma + cam->free_sgdma) % NUM_SG_DMA; while (queued_sgdma > 0) { sgdma = cam->sgdma + sgslot; while ((sgdma->next_sglist < sgdma->sglen) && !(sgdma->csr & csr_error)) { const struct scatterlist *sglist; sglist = sgdma->sglist + sgdma->next_sglist; /* try to start the next DMA transfer */ if (omap24xxcam_dma_start(cam, sg_dma_address(sglist), sg_dma_len(sglist), omap24xxcam_sg_dma_callback, (void *) sgslot)) { /* DMA start failed */ spin_unlock_irqrestore(&cam->sg_lock, irqflags); return; } else { /* DMA start was successful */ sgdma->next_sglist++; sgdma->queued_sglist++; } } queued_sgdma--; sgslot = (sgslot + 1) % NUM_SG_DMA; } spin_unlock_irqrestore(&cam->sg_lock, irqflags);}/* Queue a scatter-gather DMA transfer from the camera to memory. * Returns zero if the transfer was successfully queued, or * non-zero if all of the scatter-gather slots are already in use. */static int omap24xxcam_sg_dma_queue(struct omap24xxcam_device *cam, const struct scatterlist *sglist, int sglen, dma_callback_t callback, void *arg){ unsigned long irqflags; struct sgdma_state *sgdma; if ((sglen < 0) || ((sglen > 0) & !sglist)) return -EINVAL; spin_lock_irqsave(&cam->sg_lock, irqflags); if (!cam->free_sgdma) { spin_unlock_irqrestore(&cam->sg_lock, irqflags); return -EBUSY; } sgdma = cam->sgdma + cam->next_sgdma; sgdma->sglist = sglist; sgdma->sglen = sglen; sgdma->next_sglist = 0; sgdma->queued_sglist = 0; sgdma->csr = 0; sgdma->callback = callback; sgdma->arg = arg; cam->next_sgdma = (cam->next_sgdma + 1) % NUM_SG_DMA; cam->free_sgdma--; spin_unlock_irqrestore(&cam->sg_lock, irqflags); omap24xxcam_sg_dma_process(cam); return 0;}/* Abort all chained DMA transfers. After all transfers have been aborted and * the DMA controller is idle, the completion routines for any aborted transfers * will be called in sequence. The DMA controller may not be idle after this * routine completes, because the completion routines might start new transfers. */static voidomap24xxcam_dma_abort(struct omap24xxcam_device *cam, unsigned long csr){ unsigned long irqflags; int dmach, i, free_dmach; dma_callback_t callback; void *arg; spin_lock_irqsave(&cam->dma_lock, irqflags); /* stop any DMA transfers in progress */ dmach = (cam->next_dmach + cam->free_dmach) % NUM_CAMDMA_CHANNELS; for (i = 0; i < NUM_CAMDMA_CHANNELS; i++) { /* mask all interrupts from this channel */ camdma_reg_out(cam, CAMDMA_CICR(dmach), 0); /* unlink this channel */ camdma_reg_merge(cam, CAMDMA_CLNK_CTRL(dmach), 0, CAMDMA_CLNK_CTRL_ENABLE_LNK); /* disable this channel */ camdma_reg_merge(cam, CAMDMA_CCR(dmach), 0, CAMDMA_CCR_ENABLE); dmach = (dmach + 1) % NUM_CAMDMA_CHANNELS; } /* We have to be careful here because the callback routine might start * a new DMA transfer, and we only want to abort transfers that were * started before this routine was called. */ free_dmach = cam->free_dmach; while ((cam->free_dmach < NUM_CAMDMA_CHANNELS) && (free_dmach < NUM_CAMDMA_CHANNELS)) { dmach = (cam->next_dmach + cam->free_dmach) % NUM_CAMDMA_CHANNELS; callback = cam->camdma[dmach].callback; arg = cam->camdma[dmach].arg; cam->free_dmach++; free_dmach++; if (callback) { /* leave interrupts disabled during callback */ spin_unlock(&cam->dma_lock); (*callback)(cam, csr, arg); spin_lock(&cam->dma_lock); } } spin_unlock_irqrestore(&cam->dma_lock, irqflags);}/* Abort all chained DMA transfers. After all transfers have been aborted and * the DMA controller is idle, the completion routines for any aborted transfers * will be called in sequence. If the completion routines attempt to start a * new DMA transfer it will fail, so the DMA controller will be idle after this * routine completes. */static voidomap24xxcam_dma_stop(struct omap24xxcam_device *cam, unsigned long csr){ unsigned long irqflags; spin_lock_irqsave(&cam->dma_lock, irqflags); cam->dma_stop++; spin_unlock_irqrestore(&cam->dma_lock, irqflags); omap24xxcam_dma_abort(cam, csr); spin_lock_irqsave(&cam->dma_lock, irqflags); cam->dma_stop--; spin_unlock_irqrestore(&cam->dma_lock, irqflags);}/* Sync scatter-gather DMA by aborting any DMA transfers currently in progress. * Any queued scatter-gather DMA transactions that have not yet been started * will remain queued. The DMA controller will be idle after this routine * completes. When the scatter-gather queue is restarted, the next * scatter-gather DMA transfer will begin at the start of a new transaction. */static voidomap24xxcam_sg_dma_sync(struct omap24xxcam_device *cam, unsigned long csr){ unsigned long irqflags; int sgslot; struct sgdma_state *sgdma;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -