📄 t264enc.cpp
字号:
}
else
{ // an unexpected error has occured...
return E_UNEXPECTED;
}
// Copy the discontinuity property
hr = pSource->IsDiscontinuity();
if(hr == S_OK)
{
pDest->SetDiscontinuity(TRUE);
}
else if(hr == S_FALSE)
{
pDest->SetDiscontinuity(FALSE);
}
else
{ // an unexpected error has occured...
return E_UNEXPECTED;
}
// Copy the actual data length
long lDataLength = pSource->GetActualDataLength();
pDest->SetActualDataLength(lDataLength);
return NOERROR;
} // Copy
HRESULT CT264Dec::CheckInputType(const CMediaType *mtIn)
{
CheckPointer(mtIn,E_POINTER);
if(*mtIn->FormatType() == FORMAT_VideoInfo)
{
// if (*mtIn->Subtype() == CLSID_T264SUBTYPE)
{
if(mtIn->FormatLength() < sizeof(VIDEOINFOHEADER))
return E_INVALIDARG;
VIDEOINFO *pInput = (VIDEOINFO *) mtIn->Format();
if (pInput->bmiHeader.biCompression == 'HSSV' || pInput->bmiHeader.biCompression == '462T')
{
m_nWidth = pInput->bmiHeader.biWidth;
m_nHeight = pInput->bmiHeader.biHeight;
m_framerate = (float)(INT)((float)10000000 / pInput->AvgTimePerFrame + 0.5);
m_avgFrameTime = pInput->AvgTimePerFrame;
return NOERROR;
}
}
}
else if (*mtIn->FormatType() == FORMAT_VideoInfo2)
{
// if (*mtIn->Subtype() == CLSID_T264SUBTYPE)
{
if(mtIn->FormatLength() < sizeof(VIDEOINFOHEADER2))
return E_INVALIDARG;
VIDEOINFOHEADER2 *pInput = (VIDEOINFOHEADER2*) mtIn->Format();
if (pInput->bmiHeader.biCompression == 'HSSV' || pInput->bmiHeader.biCompression == '462T')
{
m_nWidth = pInput->bmiHeader.biWidth;
m_nHeight = pInput->bmiHeader.biHeight;
m_avgFrameTime = pInput->AvgTimePerFrame;
m_framerate = (float)(INT)((float)10000000 / pInput->AvgTimePerFrame + 0.5);
return NOERROR;
}
}
}
return E_INVALIDARG;
} // CheckInputType
HRESULT CT264Dec::CheckTransform(const CMediaType *mtIn,const CMediaType *mtOut)
{
CheckPointer(mtIn,E_POINTER);
CheckPointer(mtOut,E_POINTER);
HRESULT hr;
if(FAILED(hr = CheckInputType(mtIn)))
{
return hr;
}
if(*mtOut->FormatType() != FORMAT_VideoInfo)
{
return E_INVALIDARG;
}
// formats must be big enough
if(mtIn->FormatLength() < sizeof(VIDEOINFOHEADER) ||
mtOut->FormatLength() < sizeof(VIDEOINFOHEADER))
return E_INVALIDARG;
VIDEOINFO* pInfo = (VIDEOINFO*)mtOut->Format();
m_nStride = pInfo->bmiHeader.biWidth;
return NOERROR;
} // CheckTransform
HRESULT CT264Dec::InitOutMediaType(CMediaType* pmt)
{
pmt->InitMediaType();
pmt->SetType(&MEDIATYPE_Video);
pmt->SetSubtype(&MEDIASUBTYPE_YV12);
VIDEOINFOHEADER *pvi = (VIDEOINFOHEADER*)pmt->AllocFormatBuffer(sizeof(VIDEOINFOHEADER));
ZeroMemory(pvi, sizeof(VIDEOINFOHEADER));
pvi->AvgTimePerFrame = m_avgFrameTime;
pvi->bmiHeader.biCompression = '21VY';
pvi->bmiHeader.biBitCount = 12;
pvi->bmiHeader.biPlanes = 1;
pvi->bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
pvi->bmiHeader.biWidth = m_nWidth;
pvi->bmiHeader.biHeight = m_nHeight;
pvi->bmiHeader.biSizeImage = GetBitmapSize(&pvi->bmiHeader);
SetRectEmpty(&(pvi->rcSource));
SetRectEmpty(&(pvi->rcTarget));
pmt->SetFormatType(&FORMAT_VideoInfo);
pmt->SetVariableSize();
pmt->SetTemporalCompression(true);
return S_OK;
}
//
// DecideBufferSize
//
// Tell the output pin's allocator what size buffers we
// require. Can only do this when the input is connected
//
HRESULT CT264Dec::DecideBufferSize(IMemAllocator *pAlloc,ALLOCATOR_PROPERTIES *pProperties)
{
CheckPointer(pAlloc,E_POINTER);
CheckPointer(pProperties,E_POINTER);
// Is the input pin connected
if(m_pInput->IsConnected() == FALSE)
{
return E_UNEXPECTED;
}
HRESULT hr = NOERROR;
pProperties->cBuffers = 1;
pProperties->cbBuffer = (m_nWidth * m_nHeight >> 1) + m_nWidth * m_nHeight;
ASSERT(pProperties->cbBuffer);
// If we don't have fixed sized samples we must guess some size
if(!m_pOutput->CurrentMediaType().bFixedSizeSamples)
{
if(pProperties->cbBuffer < OUTPIN_BUFFER_SIZE)
{
// nothing more than a guess!!
pProperties->cbBuffer = OUTPIN_BUFFER_SIZE;
}
}
// Ask the allocator to reserve us some sample memory, NOTE the function
// can succeed (that is return NOERROR) but still not have allocated the
// memory that we requested, so we must check we got whatever we wanted
ALLOCATOR_PROPERTIES Actual;
hr = pAlloc->SetProperties(pProperties,&Actual);
if(FAILED(hr))
{
return hr;
}
ASSERT(Actual.cBuffers == 1);
if(pProperties->cBuffers > Actual.cBuffers ||
pProperties->cbBuffer > Actual.cbBuffer)
{
return E_FAIL;
}
return NOERROR;
} // DecideBufferSize
//
// GetMediaType
//
// I support one type, namely the type of the input pin
// We must be connected to support the single output type
//
HRESULT CT264Dec::GetMediaType(int iPosition, CMediaType *pMediaType)
{
// Is the input pin connected
if(m_pInput->IsConnected() == FALSE)
{
return E_UNEXPECTED;
}
// This should never happen
if(iPosition < 0)
{
return E_INVALIDARG;
}
// Do we have more items to offer
if(iPosition > 0)
{
return VFW_S_NO_MORE_ITEMS;
}
CheckPointer(pMediaType,E_POINTER);
InitOutMediaType(pMediaType);
return NOERROR;
} // GetMediaType
HRESULT CT264Dec::StartStreaming()
{
_asm emms
if (m_t264 == NULL)
{
m_t264 = T264dec_open();
ASSERT(m_t264);
}
m_time = 0;
return CTransformFilter::StartStreaming();
}
HRESULT CT264Dec::StopStreaming()
{
if (m_t264 != NULL)
{
T264dec_close(m_t264);
m_t264 = 0;
}
return CTransformFilter::StopStreaming();
}
//////////////////////////////////////////////////////////////////////////
// CT264Splitter
CT264Splitter::CT264Splitter(
LPUNKNOWN pUnk,
HRESULT *phr) :
CBaseSplitterFilter(
TEXT("CT264Splitter"),
pUnk,
CLSID_T264Splitter,
phr)
{
// Create our input pin
m_pInput = new CSplitterInputPin(this, phr);
}
CUnknown *CT264Splitter::CreateInstance(LPUNKNOWN pUnk, HRESULT *phr)
{
CUnknown *pUnkRet = new CT264Splitter(pUnk, phr);
return pUnkRet;
}
// Override type checking
HRESULT CT264Splitter::CheckInputType(const CMediaType *pmt)
{
/* We'll accept our preferred type or a wild card for the subtype */
/*if (pmt->majortype != MEDIATYPE_Stream ||
pmt->subtype != MEDIASUBTYPE_NULL) {
return S_FALSE;
} else {
return S_OK;
}*/
// Async. source filter just send majortype equal null
return S_OK;
}
LPAMOVIESETUP_FILTER CT264Splitter::GetSetupData()
{
return (LPAMOVIESETUP_FILTER)&subDST264Splitter;
}
/* Complete connection and instantiate parser
This involves:
Instatiate the parser with for the type and have it check the format
*/
CBaseParser *CT264Splitter::CreateParser(
CParserNotify *pNotify,
CMediaType *pType
)
{
HRESULT hr = S_OK;
return new CT264Parser(pNotify, &hr);
}
/* Cheap'n nasty parser - DON'T do yours like this! */
/* Initialize a parser
pmt - type of stream if known - can be NULL
pRdr - way to read the source medium - can be NULL
*/
HRESULT CT264Parser::Init(CParseReader *pRdr)
{
const DWORD dwLen = 128;
/* Just read 32K and look for interesting stuff */
PBYTE pbData = new BYTE[dwLen];
if (pbData == NULL) {
return E_OUTOFMEMORY;
}
HRESULT hr = pRdr->Read(pbData, dwLen);
if (S_OK != hr) {
delete [] pbData;
return hr;
}
/* Now just loop looking for start codes */
DWORD dwLeft = dwLen;
int is_t264 = 0;
PBYTE pbCurrent = pbData;
{
DWORD dwCode = *(UNALIGNED DWORD *)pbCurrent;
/* Check if it's a valid start code */
if (dwCode == 0x01000000)
{
int run = 1;
/* Create the media type from the stream
only support Payload streams for now
*/
VIDEOINFO* pInfo;
CMediaType cmt;
cmt.InitMediaType();
T264_t* t = T264dec_open();
T264dec_buffer(t, pbData, dwLen);
while (run)
{
decoder_state_t state = T264dec_parse(t);
switch(state)
{
case DEC_STATE_CUSTOM_SET:
is_t264 = true;
break;
case DEC_STATE_SEQ:
run = 0;
cmt.SetType(&MEDIATYPE_Video);
cmt.SetSubtype(&CLSID_T264SUBTYPE);
cmt.SetFormatType(&FORMAT_VideoInfo);
cmt.AllocFormatBuffer(sizeof(VIDEOINFO));
ZeroMemory(cmt.pbFormat, sizeof(VIDEOINFO));
pInfo = (VIDEOINFO*)cmt.Format();
pInfo->bmiHeader.biWidth = t->width;
pInfo->bmiHeader.biHeight = t->height;
pInfo->bmiHeader.biBitCount = 12;
pInfo->bmiHeader.biCompression = '462T';
pInfo->bmiHeader.biPlanes = 1;
pInfo->bmiHeader.biSizeImage = GetBitmapSize(&pInfo->bmiHeader);
SetRectEmpty(&(pInfo->rcSource));
SetRectEmpty(&(pInfo->rcTarget));
cmt.SetVariableSize();
cmt.SetTemporalCompression(true);
if (t->aspect_ratio == 2)
pInfo->AvgTimePerFrame = 400000;
else
pInfo->AvgTimePerFrame = 333333;
break;
case DEC_STATE_SLICE:
case DEC_STATE_BUFFER:
ASSERT(false);
break;
case DEC_STATE_PIC:
break;
default:
/* do not care */
break;
}
};
T264dec_close(t);
/* Create our video stream */
m_pNotify->CreateStream(L"Video", &m_Video.m_pNotify);
m_Video.m_pNotify->AddMediaType(&cmt);
}
}
delete [] pbData;
if (!is_t264 || !m_Video.Initialized())
{
return VFW_E_TYPE_NOT_ACCEPTED;
}
else
{
return S_OK;
}
}
/* Get the size and count of buffers preferred based on the
actual content
*/
void CT264Parser::GetSizeAndCount(LONG *plSize, LONG *plCount)
{
*plSize = 32768;
*plCount = 4;
}
/* Call this to reinitialize for a new stream */
void CT264Parser::StreamReset()
{
}
/* Call this to pass new stream data :
pbData - pointer to data
lData - length of data
plProcessed - Amount of data consumed
*/
HRESULT CT264Parser::Process(
const BYTE * pbData,
LONG lData,
LONG *plProcessed
)
{
/* Just loop processing packets until we run out of data
We should do a lot more to sync up than just eat a start
code !
*/
// we do not to parse anything, the decoder do itself!
m_Video.m_pNotify->SendSample(
pbData,
lData,
0,
false);
*plProcessed = lData;
/*
DWORD dwLeft = lData;
const BYTE * pbCurrent = pbData;
pbCurrent += 4;
DWORD dwCode = 0xffffff00;
BOOL bSend = false;
DWORD dwSend = 0;
while (dwLeft > 4)
{
// Find a start code
dwCode = (dwCode << 8) | (*pbCurrent ++);
if (dwCode == 0x00000001)
{
m_Video.m_pNotify->SendSample(
pbData,
pbCurrent - pbData - 4,
0,
false);
bSend = true;
dwSend += pbCurrent - pbData - 4;
pbData = pbCurrent - 4;
}
dwLeft --;
}
if (bSend)
{
*plProcessed = dwSend;
}
else
{
m_Video.m_pNotify->SendSample(
pbData,
lData,
0,
false);
*plProcessed = lData;
}
*/
return S_OK;
}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -