📄 pxa_camera.c
字号:
} // start channel 2 if ( camera_context->fifo2_descriptors_virtual ) { des_virtual = (pxa_dma_desc *)camera_context->fifo2_descriptors_virtual + block_id * camera_context->fifo2_num_descriptors; des_physical = (pxa_dma_desc *)camera_context->fifo2_descriptors_physical + block_id * camera_context->fifo2_num_descriptors; DDADR(camera_context->dma_channels[2]) = des_physical; DCSR(camera_context->dma_channels[2]) |= DCSR_RUN; }#ifdef CONFIG_DPM camera_context->dma_started = 1;#endif}void stop_dma_transfer( p_camera_context_t camera_context ){ int ch0, ch1, ch2; ch0 = camera_context->dma_channels[0]; ch1 = camera_context->dma_channels[1]; ch2 = camera_context->dma_channels[2]; DCSR(ch0) &= ~DCSR_RUN; DCSR(ch1) &= ~DCSR_RUN; DCSR(ch2) &= ~DCSR_RUN;#ifdef CONFIG_DPM camera_context->dma_started = 0;#endif}int start_capture( p_camera_context_t camera_context, unsigned int block_id, unsigned int frames ){ int status; // clear ci fifo ci_reset_fifo(); ci_clear_int_status(0xFFFFFFFF); // start dma start_dma_transfer(camera_context, block_id); // start capture status = camera_context->camera_functions->start_capture(camera_context, frames); return status;}/*********************************************************************** * * Init/Deinit APIs * ***********************************************************************/int camera_init( p_camera_context_t camera_context ){ int ret = 0; int i;// parameter check if (camera_context->buffer_virtual == NULL || camera_context->buffer_physical == NULL || camera_context->buf_size == 0) return STATUS_WRONG_PARAMETER; if (camera_context->dma_descriptors_virtual == NULL || camera_context->dma_descriptors_physical == NULL || camera_context->dma_descriptors_size == 0) return STATUS_WRONG_PARAMETER; if (camera_context->sensor_type > CAMERA_TYPE_MAX) return STATUS_WRONG_PARAMETER; if (camera_context->capture_input_format > CAMERA_IMAGE_FORMAT_MAX || camera_context->capture_output_format > CAMERA_IMAGE_FORMAT_MAX) return STATUS_WRONG_PARAMETER; // check the function dispatch table according to the sensor type if ( !camera_context->camera_functions ) return STATUS_WRONG_PARAMETER; if ( !camera_context->camera_functions->init || !camera_context->camera_functions->deinit || !camera_context->camera_functions->set_capture_format || !camera_context->camera_functions->start_capture || !camera_context->camera_functions->stop_capture ) return STATUS_WRONG_PARAMETER; // init context status for(i=0; i<3; i++) camera_context->dma_channels[i] = 0xFF; (int)camera_context->fifo0_descriptors_virtual = NULL; (int)camera_context->fifo1_descriptors_virtual = NULL; (int)camera_context->fifo2_descriptors_virtual = NULL; (int)camera_context->fifo0_descriptors_physical = NULL; (int)camera_context->fifo1_descriptors_physical = NULL; (int)camera_context->fifo2_descriptors_physical = NULL; camera_context->fifo0_num_descriptors = 0; camera_context->fifo1_num_descriptors = 0; camera_context->fifo2_num_descriptors = 0; camera_context->fifo0_transfer_size = 0; camera_context->fifo1_transfer_size = 0; camera_context->fifo2_transfer_size = 0; camera_context->block_number = 0; camera_context->block_size = 0; camera_context->block_header = 0; camera_context->block_tail = 0; //printk("\nyul before camera_gpio_init \n"); // Enable hardware camera_gpio_init(); //printk("\nyul before ci_init \n"); // capture interface init ci_init(); //printk("\nyul before camera_functions->init \n"); // sensor init ret = camera_context->camera_functions->init(camera_context); if (ret) goto camera_init_err; camera_context->dma_channels[0] = ci_dma_y; camera_context->dma_channels[1] = ci_dma_cb; camera_context->dma_channels[2] = ci_dma_cr; // set capture format ret = camera_set_capture_format(camera_context); if (ret) goto camera_init_err; // set frame rate camera_set_capture_frame_rate(camera_context); return 0;camera_init_err: camera_deinit(camera_context); return -1; }void camera_gpio_init(){ //set gpio for nw board yul set_GPIO_mode( 27 | GPIO_ALT_FN_3_IN); /* CIF_DD[0] */ set_GPIO_mode( 114 | GPIO_ALT_FN_1_IN); /* CIF_DD[1] */ set_GPIO_mode( 116 | GPIO_ALT_FN_1_IN); /* CIF_DD[2] */ set_GPIO_mode( 115 | GPIO_ALT_FN_2_IN); /* CIF_DD[3] */ set_GPIO_mode( 95 | GPIO_ALT_FN_2_IN); /* CIF_DD[4] */ set_GPIO_mode( 94 | GPIO_ALT_FN_2_IN); /* CIF_DD[5] */ set_GPIO_mode( 93 | GPIO_ALT_FN_2_IN); /* CIF_DD[6] */ set_GPIO_mode( 108 | GPIO_ALT_FN_1_IN); /* CIF_DD[7] */ /*set_GPIO_mode( 107 | GPIO_ALT_FN_1_IN); CIF_DD[8] */ /*set_GPIO_mode( 106 | GPIO_ALT_FN_1_IN); CIF_DD[9] */ set_GPIO_mode( 42 | GPIO_ALT_FN_3_OUT); /* CIF_MCLK */ set_GPIO_mode( 26 | GPIO_ALT_FN_2_IN); /* CIF_PCLK */ set_GPIO_mode( 44 | GPIO_ALT_FN_3_IN); /* CIF_LV */ set_GPIO_mode( 43 | GPIO_ALT_FN_3_IN); /* CIF_FV */ //SET GPIO9,10,17,22,39,40 output GPDR0 |= (1<<9 | 1<<10 | 1<<17 | 1<<22 |1<<11 | 1<<12); //GPDR1 |= (1<<39 | 1<<40); GAFR0_L &= ~(1<<18 | 1<<19 | 11<<20 | 1<<21 | 1<<22 |1<<23| 1<<24 | 1<<25); //GAFR0_U &= ~(1<<2 | 1<<3 | 1<<12 | 1<<13); return;}int camera_deinit( p_camera_context_t camera_context ){ int ret = 0; ret = camera_context->camera_functions->deinit(camera_context); // capture interface deinit ci_deinit(); return ret;}/*********************************************************************** * * Capture APIs * ***********************************************************************/// Set the image formatint camera_set_capture_format( p_camera_context_t camera_context ){ int ret; unsigned int frame_size; unsigned int block_number = 0; CI_IMAGE_FORMAT ci_input_format, ci_output_format; CI_MP_TIMING timing; // set capture interface if (camera_context->capture_input_format > CAMERA_IMAGE_FORMAT_MAX || camera_context->capture_output_format > CAMERA_IMAGE_FORMAT_MAX ) return STATUS_WRONG_PARAMETER;//printk("\nyul camera_set_capture_format step1\n"); ci_input_format = FORMAT_MAPPINGS[camera_context->capture_input_format]; ci_output_format = FORMAT_MAPPINGS[camera_context->capture_output_format];//printk("\nyul ci_input_format = 0x%x,ci_output_format = 0x%x,CI_INVALID_FORMAT=0x%x\n",ci_input_format,ci_output_format,CI_INVALID_FORMAT); if (ci_input_format == CI_INVALID_FORMAT || ci_output_format == CI_INVALID_FORMAT) return STATUS_WRONG_PARAMETER; ci_set_image_format(ci_input_format, ci_output_format); timing.BFW = timing.BLW = 0; //yul modified //ci_configure_mp(camera_context->capture_width -1, camera_context->capture_height-1, &timing); ci_configure_ep(1); //added by yul @2006-1-13 17:51//printk("\nyul camera_set_capture_format step2\n"); // set sensor setting ret = camera_context->camera_functions->set_capture_format(camera_context); if (ret) return ret;//printk("\nyul camera_set_capture_format step3\n"); // ring buffer init switch(camera_context->capture_output_format) { case CAMERA_IMAGE_FORMAT_RGB565: frame_size = camera_context->capture_width * camera_context->capture_height * 2; camera_context->fifo0_transfer_size = frame_size; camera_context->fifo1_transfer_size = 0; camera_context->fifo2_transfer_size = 0; break; case CAMERA_IMAGE_FORMAT_YCBCR422_PACKED: frame_size = camera_context->capture_width * camera_context->capture_height * 2; camera_context->fifo0_transfer_size = frame_size; camera_context->fifo1_transfer_size = 0; camera_context->fifo2_transfer_size = 0; break; case CAMERA_IMAGE_FORMAT_YCBCR422_PLANAR: frame_size = camera_context->capture_width * camera_context->capture_height * 2; camera_context->fifo0_transfer_size = frame_size / 2; camera_context->fifo1_transfer_size = frame_size / 4; camera_context->fifo2_transfer_size = frame_size / 4; break;/* YUV 420 support - yul case CAMERA_IMAGE_FORMAT_YCBCR420_PLANAR: frame_size = camera_context->capture_width * camera_context->capture_height * 3/2; camera_context->fifo0_transfer_size = frame_size ; camera_context->fifo1_transfer_size = frame_size / 4; camera_context->fifo2_transfer_size = frame_size / 4; break;*/// RGB666 support - JamesL case CAMERA_IMAGE_FORMAT_RGB666_PLANAR: frame_size = camera_context->capture_width * camera_context->capture_height * 4; camera_context->fifo0_transfer_size = frame_size; camera_context->fifo1_transfer_size = 0; camera_context->fifo2_transfer_size = 0; break; case CAMERA_IMAGE_FORMAT_RGB666_PACKED: frame_size = camera_context->capture_width * camera_context->capture_height * 3; camera_context->fifo0_transfer_size = frame_size; camera_context->fifo1_transfer_size = 0; camera_context->fifo2_transfer_size = 0; break;// RGB888 support - JamesL case CAMERA_IMAGE_FORMAT_RGB888_PLANAR: frame_size = camera_context->capture_width * camera_context->capture_height * 4; camera_context->fifo0_transfer_size = frame_size; camera_context->fifo1_transfer_size = 0; camera_context->fifo2_transfer_size = 0; break;//RAW10 support - yul case CAMERA_IMAGE_FORMAT_RAW10: frame_size = camera_context->capture_width * camera_context->capture_height * 2; camera_context->fifo0_transfer_size = frame_size; camera_context->fifo1_transfer_size = 0; camera_context->fifo2_transfer_size = 0; break; default: return STATUS_WRONG_PARAMETER; break; } camera_context->block_size = frame_size; block_number = camera_context->buf_size / frame_size;//printk("\nyul set capture format:block_size = 0x%x\n",frame_size);//printk("\nyul set capture format:block_number = 0x%x\n",block_number); camera_context->block_number = block_number > MAX_BLOCK_NUM ? MAX_BLOCK_NUM : block_number; camera_context->block_header = camera_context->block_tail = 0;//printk("\nyul camera_set_capture_format step5\n"); // generate dma descriptor chain ret = update_dma_chain(camera_context);//printk("\n yul camera_set_capture_format:over\n"); if (ret) return -1; return 0;}// take a picture and copy it into the ring bufferint camera_capture_still_image( p_camera_context_t camera_context, unsigned int block_id ){ int status; // init buffer status & capture camera_context->block_header = camera_context->block_tail = block_id; camera_context->capture_status = 0; status = start_capture( camera_context, block_id, 1 ); return status;}// capture motion video and copy it to the ring bufferint camera_start_video_capture( p_camera_context_t camera_context, unsigned int block_id ){ int status; // init buffer status & capture camera_context->block_header = camera_context->block_tail = block_id; camera_context->capture_status = CAMERA_STATUS_VIDEO_CAPTURE_IN_PROCESS; status = start_capture( camera_context, block_id, 0 ); return status;}// disable motion video image capturevoid camera_stop_video_capture( p_camera_context_t camera_context ){ int status; // stop capture status = camera_context->camera_functions->stop_capture(camera_context); // stop dma stop_dma_transfer(camera_context); // update the flag if ( !(camera_context->capture_status & CAMERA_STATUS_RING_BUFFER_FULL) ) camera_context->capture_status &= ~CAMERA_STATUS_VIDEO_CAPTURE_IN_PROCESS; return;}/*********************************************************************** * * Flow Control APIs * ***********************************************************************/// continue capture image to next available buffervoid camera_continue_transfer( p_camera_context_t camera_context ){ // don't think we need this either. JR // continue transfer on next block start_dma_transfer( camera_context, camera_context->block_tail );}// Return 1: there is available buffer, 0: buffer is fullint camera_next_buffer_available( p_camera_context_t camera_context ){ camera_context->block_header = (camera_context->block_header + 1) % camera_context->block_number; if (((camera_context->block_header + 1) % camera_context->block_number) != camera_context->block_tail) { return 1; } camera_context->capture_status |= CAMERA_STATUS_RING_BUFFER_FULL; return 0;}// Application supplies the FrameBufferID to the driver to tell it that the application has completed processing of // the given frame buffer, and that buffer is now available for re-use.void camera_release_frame_buffer( p_camera_context_t camera_context, unsigned int frame_buffer_id ){ camera_context->block_tail = (camera_context->block_tail + 1) % camera_context->block_number; // restart video capture only if video capture is in progress and space is available for image capture if( (camera_context->capture_status & CAMERA_STATUS_RING_BUFFER_FULL ) && (camera_context->capture_status & CAMERA_STATUS_VIDEO_CAPTURE_IN_PROCESS)) { if (((camera_context->block_header + 2) % camera_context->block_number) != camera_context->block_tail) { camera_context->capture_status &= ~CAMERA_STATUS_RING_BUFFER_FULL; start_capture( camera_context, camera_context->block_tail, 0 ); } }}// Returns the FrameBufferID for the first filled frame// Note: -1 represents buffer emptyint camera_get_first_frame_buffer_id( p_camera_context_t camera_context ){ // not sure if this routine makes any sense.. JR // check whether buffer is empty if ( (camera_context->block_header == camera_context->block_tail) && !(camera_context->capture_status & CAMERA_STATUS_RING_BUFFER_FULL) ) return -1; // return the block header return camera_context->block_header;}// Returns the FrameBufferID for the last filled frame, this would be used if we were polling for image completion data, // or we wanted to make sure there were no frames waiting for us to process.// Note: -1 represents buffer emptyint camera_get_last_frame_buffer_id( p_camera_context_t camera_context ){ int ret; // check whether buffer is empty if ( (camera_context->block_header == camera_context->block_tail) && !(camera_context->capture_status & CAMERA_STATUS_RING_BUFFER_FULL) ) return -1; // return the block before the block_tail ret = ( camera_context->block_tail + camera_context->block_number -1 ) % camera_context->block_number; return ret;}/*********************************************************************** * * Buffer Info APIs * ***********************************************************************/// Return: the number of frame buffers allocated for use.unsigned int camera_get_num_frame_buffers( p_camera_context_t camera_context ){ return camera_context->block_number;}// FrameBufferID is a number between 0 and N-1, where N is the total number of frame buffers in use. Returns the address of// the given frame buffer. The application will call this once for each frame buffer at application initialization only.void* camera_get_frame_buffer_addr( p_camera_context_t camera_context, unsigned int frame_buffer_id ){ return (void*)((unsigned)camera_context->buffer_virtual + camera_context->block_size * frame_buffer_id);}// Return the block idint camera_get_frame_buffer_id( p_camera_context_t camera_context, void* address ){ if (((unsigned)address >= (unsigned)camera_context->buffer_virtual) && ((unsigned)address <= (unsigned)camera_context->buffer_virtual + camera_context->buf_size )) { return ((unsigned)address - (unsigned)camera_context->buffer_virtual) / camera_context->block_size; } return -1;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -