⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 camerapdd.cpp

📁 PXA270平台下WINCE6.0的摄像头控制驱动
💻 CPP
📖 第 1 页 / 共 4 页
字号:
bool CCameraPdd::StopHWCapture( )
{
    if (m_bCameraHWRunning)
    {
        // Mainstone II: Stop streaming video out of sensor
        DEBUGMSG( ZONE_WARN, ( _T("StopHWCapture(%08x): stop camera sensor\r\n"), this ) );

        CameraStopVideoCapture();

        m_bCameraHWRunning = false;
    }

    return true;
}

        
bool CCameraPdd::CaptureHWStillImage( )
{
    // Mainstone II: Grab still image out of sensor
    DEBUGMSG( ZONE_WARN, ( _T("CaptureHWStillImage(%08x): grab still image from sensor\r\n"), this ) );

    // Submit the h/w dma buffers to the camera driver.
    CameraSubmitBuffer(&m_CameraHWStillBuffer, STILL_CAPTURE_BUFFER);

//    m_CsState[CAPTURE] = CSSTATE_PAUSE;

    CameraCaptureStillImage();

    return true;
}

bool CCameraPdd::RevertCaptureState( void )
{
    SetSensorState(CAPTURE, m_CsPrevState[CAPTURE]);
    return true;
}

void CCameraPdd::CameraHandleVideoFrame ( )
{
    // Here Call MDD_HandleIO which will inturn call
    // FillBuffer.
    MDD_HandleIO( m_ppModeContext[CAPTURE], CAPTURE );
    
    // After that Resubmit buffer back
    CameraSubmitBuffer(&m_CameraHWVideoBuffers[m_ulCurrentFrame], VIDEO_CAPTURE_BUFFER);

    // Maintain the frame counter
    m_ulCurrentFrame = (m_ulCurrentFrame + 1) % MAX_HW_FRAMES;
}

void CCameraPdd::CameraHandleStillFrame ( )
{
    // Here Call MDD_HandleIO which will inturn call
    // FillBuffer.
    MDD_HandleIO( m_ppModeContext[STILL], STILL );
    m_bStillInProgress = FALSE;

    // As this function is called in response to TakeStillPicture,
    // it should restore the capture state;

     RevertCaptureState();
}


DWORD CCameraPdd::FillBuffer( ULONG ulModeType, PUCHAR pImage )
{
    DWORD dwRet = 0;
    // This function will be called in response to MDD_HandleIO 

    // If any processing is required (e.g. ColorConversion), it
    // should be done here.
    PCS_VIDEOINFOHEADER pCsVideoInfoHdr = &m_CurrentFormat[ulModeType].VideoInfoHeader;
    
    
    ASSERT(pCsVideoInfoHdr->bmiHeader.biSizeImage != 0);

    // MDD will make sure that the buffer is sufficient for the image.   

    if (ulModeType == CAPTURE)
    {
        dwRet = pCsVideoInfoHdr->bmiHeader.biSizeImage; 
        CUYVYToYV12Convertor((BYTE *)m_CameraHWVideoBuffers[m_ulCurrentFrame].VirtAddr, (BYTE *)pImage);
    }
    else if (ulModeType == STILL)
    {
        dwRet = pCsVideoInfoHdr->bmiHeader.biSizeImage; 
        memcpy(pImage, (void *)m_CameraHWStillBuffer.VirtAddr, dwRet);
    }

    // return the size of the image filled
    return dwRet;
}


void CCameraPdd::SuspendCamera( )
{
    if(m_bCameraHWRunning)
    {
        m_bCameraWasRunning = TRUE;
    }

    PauseCapture();
    StopHWCapture();
    CameraSleep();
}

void CCameraPdd::ResumeCamera( )
{
    // Restart camera sensor if it was running before
    if ( m_bCameraWasRunning )
    {
        CameraResume();
        RevertCaptureState();
        m_bCameraWasRunning = FALSE;
        StartHWCapture();
    }
}

void CameraVideoFrameCallback( DWORD dwContext )
{
    CCameraPdd * pCamDevice = reinterpret_cast<CCameraPdd *>( dwContext );
    
    // Video frame is ready - put it into stream
    
    if (NULL != pCamDevice)
    {
        pCamDevice->CameraHandleVideoFrame();
    }
}

void CameraStillFrameCallback( DWORD dwContext )
{
    CCameraPdd * pCamDevice = reinterpret_cast<CCameraPdd *>( dwContext );
    
    // Still image frame is ready - put it into stream

    if (NULL != pCamDevice)
    {
        pCamDevice->CameraHandleStillFrame();
    }
}

extern "C"
int SensorInitPlatform(
    P_XLLP_Camera_Context_T pCameraContext,
    P_XLLP_Camera_DMA_Context_T pDmaContext
    )
{
    PHYSICAL_ADDRESS PA;
    MAINSTONEII_BLR_REGS        *pBLR;

    RETAILMSG(1, (TEXT("CameraInitPlatform\r\n")));

    // Mainstone II specific memory allocation
    PA.QuadPart  = MAINSTONEII_BASE_REG_PA_FPGA;
    pBLR = (MAINSTONEII_BLR_REGS *) MmMapIoSpace(PA, sizeof(MAINSTONEII_BLR_REGS), FALSE);

    if(pBLR)
    {
        // os mapped register address   
        pCameraContext->board_reg_base = (unsigned int)pBLR;
    } else {
        return FALSE;
    }

    // Camera sensor specific information (Agilent ADCM2650)
    pCameraContext->sensor_type = XLLP_CAMERA_TYPE_ADCM_2650; 

    pCameraContext->camera_functions->init = CameraFuncADCM2650Init;
    pCameraContext->camera_functions->deinit = CameraFuncADCM2650DeInit;
    pCameraContext->camera_functions->set_capture_format = CameraFuncADCM2650SetCaptureFormat;
    pCameraContext->camera_functions->start_capture = CameraFuncADCM2650StartCapture;
    pCameraContext->camera_functions->stop_capture = CameraFuncADCM2650StopCapture;

    // capture image info
    pCameraContext->capture_width = DEFAULT_VIDEO_WIDTH;
    pCameraContext->capture_height = DEFAULT_VIDEO_HEIGHT;
    pCameraContext->Video_capture_width = DEFAULT_VIDEO_WIDTH;
    pCameraContext->Video_capture_height = DEFAULT_VIDEO_HEIGHT;
    pCameraContext->Still_capture_width = DEFAULT_STILL_WIDTH;
    pCameraContext->Still_capture_height = DEFAULT_STILL_HEIGHT;
    pCameraContext->sensor_flip_mode = XLLP_CAMERA_VIDEO_FLIP_VERTICAL |
                                       XLLP_CAMERA_STILL_FLIP_HORIZONTAL;
    pCameraContext->Video_capture_input_format = DEFAULT_CAMERA_VIDEO_INPUT_FORMAT;
    pCameraContext->Video_capture_output_format = DEFAULT_CAMERA_VIDEO_OUTPUT_FORMAT;

    pCameraContext->Still_capture_input_format = DEFAULT_CAMERA_STILL_INPUT_FORMAT;
    pCameraContext->Still_capture_output_format = DEFAULT_CAMERA_STILL_OUTPUT_FORMAT;

    // Assign the DMA channels to camera sensor
    pDmaContext->dma_channels[0] = XLLP_DMAC_CHANNEL_16;
    pDmaContext->dma_channels[1] = XLLP_DMAC_CHANNEL_17;
    pDmaContext->dma_channels[2] = XLLP_DMAC_CHANNEL_18;

    return TRUE;
}

extern "C"
int SensorDeinitPlatform(
    P_XLLP_Camera_Context_T pCameraContext,
    P_XLLP_Camera_DMA_Context_T pDmaContext
    )
{
    pDmaContext->dma_channels[0] = (XLLP_DMAC_CHANNEL_T)0xFF;
    pDmaContext->dma_channels[1] = (XLLP_DMAC_CHANNEL_T)0xFF;
    pDmaContext->dma_channels[2] = (XLLP_DMAC_CHANNEL_T)0xFF;

    if(pCameraContext->board_reg_base)
    {
        MmUnmapIoSpace((void *)pCameraContext->board_reg_base, sizeof(MAINSTONEII_BLR_REGS));
        pCameraContext->board_reg_base = 0;
    }
    return TRUE;
}
// This routine converts UYVY into YV12. It is used only for Capture pin.


struct UYVYMacroPixel
{
    BYTE U0;
    BYTE Y0;
    BYTE V0;
    BYTE Y1;
};


DWORD CCameraPdd:: CUYVYToYV12Convertor(BYTE *pInput, BYTE *pOutput)
{ 

    PCS_VIDEOINFOHEADER pCsVideoInfoHdr = &m_CurrentFormat[CAPTURE].VideoInfoHeader;

    ASSERT(pCsVideoInfoHdr != NULL);

    LONG lHeight = abs(pCsVideoInfoHdr->bmiHeader.biHeight);
    LONG lWidth = pCsVideoInfoHdr->bmiHeader.biWidth;


    LONG SrcStride = WIDTHBYTES(lWidth *16); // Input is 16 bit YUV packed (UYVY)
    LONG DstStride = lWidth; // As output is planar 
    
    LONG lSrcDelta = SrcStride - (lWidth * 2);
    LONG lDstDelta = 0; 
    

    if (0 == lHeight)
        return ERROR_SUCCESS;

    BYTE *pYUY2Line1 = pInput;
    BYTE *pYUY2Line2;

    BYTE *pDstYLine1 = pOutput;
    BYTE *pDstYLine2 = pOutput + DstStride;
    BYTE *pDstV = pOutput + (DstStride * lHeight);
    BYTE *pDstU = pDstV + ((DstStride * lHeight) >> 2);

    do {
        // YUY2 images can have odd height
        pYUY2Line2 = (1 == lHeight) ? pYUY2Line1 
                                    : (pYUY2Line1 + SrcStride);
        pDstYLine2 = pDstYLine1 + DstStride;

        // add two because we'll pre-decrement in the do-loop below
        LONG lDecWidth = lWidth + 2;

        // read/process one macropixel (2 pixels) at a time
        while (lDecWidth -= 2)
        {
            UYVYMacroPixel mpSrc1, mpSrc2;
            mpSrc1.U0 = *pYUY2Line1++;
            mpSrc1.Y0 = *pYUY2Line1++;
            mpSrc1.V0 = *pYUY2Line1++;
            mpSrc1.Y1 = *pYUY2Line1++;
            mpSrc2.U0 = *pYUY2Line2++;
            mpSrc2.Y0 = *pYUY2Line2++;
            mpSrc2.V0 = *pYUY2Line2++;
            mpSrc2.Y1 = *pYUY2Line2++;

            // Y plane
            *pDstYLine1++ = mpSrc1.Y0;
            *pDstYLine1++ = mpSrc1.Y1;
            *pDstYLine2++ = mpSrc2.Y0;
            *pDstYLine2++ = mpSrc2.Y1;

            // V plane
            *pDstV++ = (mpSrc1.V0 + mpSrc2.V0) >> 1;

            // U plane
            *pDstU++ = (mpSrc1.U0 + mpSrc2.U0) >> 1;
        }

        pYUY2Line1 = pYUY2Line2 + lSrcDelta;
        // pYUY2Line2 gets set at the top of the loop, based on pYUY2Line1
        pDstYLine1 = pDstYLine2 + lDstDelta;
        // pDstYLine2 gets set at the top of the loop, based on pDstYLine1
        pDstV      += lDstDelta >> 1;
        pDstU      += lDstDelta >> 1;
    } while (lHeight -= 2);

    return ERROR_SUCCESS;
}


⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -